Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'imx-drm-next-20160301' of git://git.pengutronix.de/git/pza/linux into drm-next

imx-drm vblank IRQ control, fence support, and of endpoint helpers

- Add and make use of drm_of_active_endpoint helpers
- Silence a noisy dev_info into a dev_dbg
- Stop touching primary fb on pageflips
- Track flip state explicitly
- Keep GEM buffer objects referenced while scanout is active
- Implement fence sync by deferring flips to a workqueue for
dma-bufs with pending fences
- Actually disable vblank IRQs while they are not needed

* tag 'imx-drm-next-20160301' of git://git.pengutronix.de/git/pza/linux:
drm/imx: only enable vblank IRQs when needed
drm/imx: implement fence sync
drm/imx: keep GEM object referenced as long as scanout is active
drm/imx: track flip state explicitly
drm/imx: don't touch primary fb on pageflip
drm/imx: ipuv3 plane: Replace dev_info with dev_dbg if a plane's CRTC changes
gpu: ipu-v3: ipu-dc: Simplify display controller microcode setup
drm/rockchip: remove rockchip_drm_encoder_get_mux_id
drm/imx: remove imx_drm_encoder_get_mux_id
drm: add drm_of_encoder_active_endpoint helpers

+222 -109
+34
drivers/gpu/drm/drm_of.c
··· 149 149 return component_master_add_with_match(dev, m_ops, match); 150 150 } 151 151 EXPORT_SYMBOL(drm_of_component_probe); 152 + 153 + /* 154 + * drm_of_encoder_active_endpoint - return the active encoder endpoint 155 + * @node: device tree node containing encoder input ports 156 + * @encoder: drm_encoder 157 + * 158 + * Given an encoder device node and a drm_encoder with a connected crtc, 159 + * parse the encoder endpoint connecting to the crtc port. 160 + */ 161 + int drm_of_encoder_active_endpoint(struct device_node *node, 162 + struct drm_encoder *encoder, 163 + struct of_endpoint *endpoint) 164 + { 165 + struct device_node *ep; 166 + struct drm_crtc *crtc = encoder->crtc; 167 + struct device_node *port; 168 + int ret; 169 + 170 + if (!node || !crtc) 171 + return -EINVAL; 172 + 173 + for_each_endpoint_of_node(node, ep) { 174 + port = of_graph_get_remote_port(ep); 175 + of_node_put(port); 176 + if (port == crtc->port) { 177 + ret = of_graph_parse_endpoint(ep, endpoint); 178 + of_node_put(ep); 179 + return ret; 180 + } 181 + } 182 + 183 + return -EINVAL; 184 + } 185 + EXPORT_SYMBOL_GPL(drm_of_encoder_active_endpoint);
+1 -1
drivers/gpu/drm/imx/dw_hdmi-imx.c
··· 118 118 static void dw_hdmi_imx_encoder_commit(struct drm_encoder *encoder) 119 119 { 120 120 struct imx_hdmi *hdmi = container_of(encoder, struct imx_hdmi, encoder); 121 - int mux = imx_drm_encoder_get_mux_id(hdmi->dev->of_node, encoder); 121 + int mux = drm_of_encoder_active_port_id(hdmi->dev->of_node, encoder); 122 122 123 123 regmap_update_bits(hdmi->regmap, IOMUXC_GPR3, 124 124 IMX6Q_GPR3_HDMI_MUX_CTL_MASK,
-31
drivers/gpu/drm/imx/imx-drm-core.c
··· 17 17 #include <linux/device.h> 18 18 #include <linux/fb.h> 19 19 #include <linux/module.h> 20 - #include <linux/of_graph.h> 21 20 #include <linux/platform_device.h> 22 21 #include <drm/drmP.h> 23 22 #include <drm/drm_fb_helper.h> ··· 410 411 return 0; 411 412 } 412 413 EXPORT_SYMBOL_GPL(imx_drm_encoder_parse_of); 413 - 414 - /* 415 - * @node: device tree node containing encoder input ports 416 - * @encoder: drm_encoder 417 - */ 418 - int imx_drm_encoder_get_mux_id(struct device_node *node, 419 - struct drm_encoder *encoder) 420 - { 421 - struct imx_drm_crtc *imx_crtc = imx_drm_find_crtc(encoder->crtc); 422 - struct device_node *ep; 423 - struct of_endpoint endpoint; 424 - struct device_node *port; 425 - int ret; 426 - 427 - if (!node || !imx_crtc) 428 - return -EINVAL; 429 - 430 - for_each_endpoint_of_node(node, ep) { 431 - port = of_graph_get_remote_port(ep); 432 - of_node_put(port); 433 - if (port == imx_crtc->crtc->port) { 434 - ret = of_graph_parse_endpoint(ep, &endpoint); 435 - of_node_put(ep); 436 - return ret ? ret : endpoint.port; 437 - } 438 - } 439 - 440 - return -EINVAL; 441 - } 442 - EXPORT_SYMBOL_GPL(imx_drm_encoder_get_mux_id); 443 414 444 415 static const struct drm_ioctl_desc imx_drm_ioctls[] = { 445 416 /* none so far */
-2
drivers/gpu/drm/imx/imx-drm.h
··· 46 46 int imx_drm_set_bus_format(struct drm_encoder *encoder, 47 47 u32 bus_format); 48 48 49 - int imx_drm_encoder_get_mux_id(struct device_node *node, 50 - struct drm_encoder *encoder); 51 49 int imx_drm_encoder_parse_of(struct drm_device *drm, 52 50 struct drm_encoder *encoder, struct device_node *np); 53 51
+3 -2
drivers/gpu/drm/imx/imx-ldb.c
··· 19 19 #include <drm/drmP.h> 20 20 #include <drm/drm_fb_helper.h> 21 21 #include <drm/drm_crtc_helper.h> 22 + #include <drm/drm_of.h> 22 23 #include <drm/drm_panel.h> 23 24 #include <linux/mfd/syscon.h> 24 25 #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> ··· 209 208 struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder); 210 209 struct imx_ldb *ldb = imx_ldb_ch->ldb; 211 210 int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN; 212 - int mux = imx_drm_encoder_get_mux_id(imx_ldb_ch->child, encoder); 211 + int mux = drm_of_encoder_active_port_id(imx_ldb_ch->child, encoder); 213 212 214 213 drm_panel_prepare(imx_ldb_ch->panel); 215 214 ··· 259 258 int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN; 260 259 unsigned long serial_clk; 261 260 unsigned long di_clk = mode->clock * 1000; 262 - int mux = imx_drm_encoder_get_mux_id(imx_ldb_ch->child, encoder); 261 + int mux = drm_of_encoder_active_port_id(imx_ldb_ch->child, encoder); 263 262 264 263 if (mode->clock > 170000) { 265 264 dev_warn(ldb->dev,
+122 -11
drivers/gpu/drm/imx/ipuv3-crtc.c
··· 22 22 #include <linux/fb.h> 23 23 #include <linux/clk.h> 24 24 #include <linux/errno.h> 25 + #include <linux/reservation.h> 26 + #include <linux/dma-buf.h> 25 27 #include <drm/drm_gem_cma_helper.h> 26 28 #include <drm/drm_fb_cma_helper.h> 27 29 ··· 32 30 #include "ipuv3-plane.h" 33 31 34 32 #define DRIVER_DESC "i.MX IPUv3 Graphics" 33 + 34 + enum ipu_flip_status { 35 + IPU_FLIP_NONE, 36 + IPU_FLIP_PENDING, 37 + IPU_FLIP_SUBMITTED, 38 + }; 39 + 40 + struct ipu_flip_work { 41 + struct work_struct unref_work; 42 + struct drm_gem_object *bo; 43 + struct drm_pending_vblank_event *page_flip_event; 44 + struct work_struct fence_work; 45 + struct ipu_crtc *crtc; 46 + struct fence *excl; 47 + unsigned shared_count; 48 + struct fence **shared; 49 + }; 35 50 36 51 struct ipu_crtc { 37 52 struct device *dev; ··· 61 42 struct ipu_dc *dc; 62 43 struct ipu_di *di; 63 44 int enabled; 64 - struct drm_pending_vblank_event *page_flip_event; 65 - struct drm_framebuffer *newfb; 45 + enum ipu_flip_status flip_state; 46 + struct workqueue_struct *flip_queue; 47 + struct ipu_flip_work *flip_work; 66 48 int irq; 67 49 u32 bus_format; 68 50 int di_hsync_pin; ··· 122 102 } 123 103 } 124 104 105 + static void ipu_flip_unref_work_func(struct work_struct *__work) 106 + { 107 + struct ipu_flip_work *work = 108 + container_of(__work, struct ipu_flip_work, unref_work); 109 + 110 + drm_gem_object_unreference_unlocked(work->bo); 111 + kfree(work); 112 + } 113 + 114 + static void ipu_flip_fence_work_func(struct work_struct *__work) 115 + { 116 + struct ipu_flip_work *work = 117 + container_of(__work, struct ipu_flip_work, fence_work); 118 + int i; 119 + 120 + /* wait for all fences attached to the FB obj to signal */ 121 + if (work->excl) { 122 + fence_wait(work->excl, false); 123 + fence_put(work->excl); 124 + } 125 + for (i = 0; i < work->shared_count; i++) { 126 + fence_wait(work->shared[i], false); 127 + fence_put(work->shared[i]); 128 + } 129 + 130 + work->crtc->flip_state = IPU_FLIP_SUBMITTED; 131 + } 132 + 125 133 static int ipu_page_flip(struct drm_crtc *crtc, 126 134 struct drm_framebuffer *fb, 127 135 struct drm_pending_vblank_event *event, 128 136 uint32_t page_flip_flags) 129 137 { 138 + struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0); 130 139 struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); 140 + struct ipu_flip_work *flip_work; 131 141 int ret; 132 142 133 - if (ipu_crtc->newfb) 143 + if (ipu_crtc->flip_state != IPU_FLIP_NONE) 134 144 return -EBUSY; 135 145 136 146 ret = imx_drm_crtc_vblank_get(ipu_crtc->imx_crtc); ··· 171 121 return ret; 172 122 } 173 123 174 - ipu_crtc->newfb = fb; 175 - ipu_crtc->page_flip_event = event; 176 - crtc->primary->fb = fb; 124 + flip_work = kzalloc(sizeof *flip_work, GFP_KERNEL); 125 + if (!flip_work) { 126 + ret = -ENOMEM; 127 + goto put_vblank; 128 + } 129 + INIT_WORK(&flip_work->unref_work, ipu_flip_unref_work_func); 130 + flip_work->page_flip_event = event; 131 + 132 + /* get BO backing the old framebuffer and take a reference */ 133 + flip_work->bo = &drm_fb_cma_get_gem_obj(crtc->primary->fb, 0)->base; 134 + drm_gem_object_reference(flip_work->bo); 135 + 136 + ipu_crtc->flip_work = flip_work; 137 + /* 138 + * If the object has a DMABUF attached, we need to wait on its fences 139 + * if there are any. 140 + */ 141 + if (cma_obj->base.dma_buf) { 142 + INIT_WORK(&flip_work->fence_work, ipu_flip_fence_work_func); 143 + flip_work->crtc = ipu_crtc; 144 + 145 + ret = reservation_object_get_fences_rcu( 146 + cma_obj->base.dma_buf->resv, &flip_work->excl, 147 + &flip_work->shared_count, &flip_work->shared); 148 + 149 + if (unlikely(ret)) { 150 + DRM_ERROR("failed to get fences for buffer\n"); 151 + goto free_flip_work; 152 + } 153 + 154 + /* No need to queue the worker if the are no fences */ 155 + if (!flip_work->excl && !flip_work->shared_count) { 156 + ipu_crtc->flip_state = IPU_FLIP_SUBMITTED; 157 + } else { 158 + ipu_crtc->flip_state = IPU_FLIP_PENDING; 159 + queue_work(ipu_crtc->flip_queue, 160 + &flip_work->fence_work); 161 + } 162 + } else { 163 + ipu_crtc->flip_state = IPU_FLIP_SUBMITTED; 164 + } 177 165 178 166 return 0; 167 + 168 + free_flip_work: 169 + drm_gem_object_unreference_unlocked(flip_work->bo); 170 + kfree(flip_work); 171 + ipu_crtc->flip_work = NULL; 172 + put_vblank: 173 + imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc); 174 + 175 + return ret; 179 176 } 180 177 181 178 static const struct drm_crtc_funcs ipu_crtc_funcs = { ··· 306 209 { 307 210 unsigned long flags; 308 211 struct drm_device *drm = ipu_crtc->base.dev; 212 + struct ipu_flip_work *work = ipu_crtc->flip_work; 309 213 310 214 spin_lock_irqsave(&drm->event_lock, flags); 311 - if (ipu_crtc->page_flip_event) 215 + if (work->page_flip_event) 312 216 drm_crtc_send_vblank_event(&ipu_crtc->base, 313 - ipu_crtc->page_flip_event); 314 - ipu_crtc->page_flip_event = NULL; 217 + work->page_flip_event); 315 218 imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc); 316 219 spin_unlock_irqrestore(&drm->event_lock, flags); 317 220 } ··· 322 225 323 226 imx_drm_handle_vblank(ipu_crtc->imx_crtc); 324 227 325 - if (ipu_crtc->newfb) { 228 + if (ipu_crtc->flip_state == IPU_FLIP_SUBMITTED) { 326 229 struct ipu_plane *plane = ipu_crtc->plane[0]; 327 230 328 - ipu_crtc->newfb = NULL; 329 231 ipu_plane_set_base(plane, ipu_crtc->base.primary->fb, 330 232 plane->x, plane->y); 331 233 ipu_crtc_handle_pageflip(ipu_crtc); 234 + queue_work(ipu_crtc->flip_queue, 235 + &ipu_crtc->flip_work->unref_work); 236 + ipu_crtc->flip_state = IPU_FLIP_NONE; 332 237 } 333 238 334 239 return IRQ_HANDLED; ··· 379 280 380 281 static int ipu_enable_vblank(struct drm_crtc *crtc) 381 282 { 283 + struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); 284 + 285 + enable_irq(ipu_crtc->irq); 286 + 382 287 return 0; 383 288 } 384 289 385 290 static void ipu_disable_vblank(struct drm_crtc *crtc) 386 291 { 292 + struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); 293 + 294 + disable_irq_nosync(ipu_crtc->irq); 387 295 } 388 296 389 297 static int ipu_set_interface_pix_fmt(struct drm_crtc *crtc, ··· 501 395 dev_err(ipu_crtc->dev, "irq request failed with %d.\n", ret); 502 396 goto err_put_plane_res; 503 397 } 398 + /* Only enable IRQ when we actually need it to trigger work. */ 399 + disable_irq(ipu_crtc->irq); 400 + 401 + ipu_crtc->flip_queue = create_singlethread_workqueue("ipu-crtc-flip"); 504 402 505 403 return 0; 506 404 ··· 547 437 548 438 imx_drm_remove_crtc(ipu_crtc->imx_crtc); 549 439 440 + destroy_workqueue(ipu_crtc->flip_queue); 550 441 ipu_plane_put_resources(ipu_crtc->plane[0]); 551 442 ipu_put_resources(ipu_crtc); 552 443 }
+1 -1
drivers/gpu/drm/imx/ipuv3-plane.c
··· 338 338 } 339 339 340 340 if (crtc != plane->crtc) 341 - dev_info(plane->dev->dev, "crtc change: %p -> %p\n", 341 + dev_dbg(plane->dev->dev, "crtc change: %p -> %p\n", 342 342 plane->crtc, crtc); 343 343 plane->crtc = crtc; 344 344
+1 -1
drivers/gpu/drm/rockchip/dw-mipi-dsi.c
··· 878 878 static void dw_mipi_dsi_encoder_commit(struct drm_encoder *encoder) 879 879 { 880 880 struct dw_mipi_dsi *dsi = encoder_to_dsi(encoder); 881 - int mux = rockchip_drm_encoder_get_mux_id(dsi->dev->of_node, encoder); 881 + int mux = drm_of_encoder_active_endpoint_id(dsi->dev->of_node, encoder); 882 882 u32 interface_pix_fmt; 883 883 u32 val; 884 884
+1 -1
drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
··· 204 204 rockchip_drm_crtc_mode_config(encoder->crtc, DRM_MODE_CONNECTOR_HDMIA, 205 205 ROCKCHIP_OUT_MODE_AAAA); 206 206 207 - mux = rockchip_drm_encoder_get_mux_id(hdmi->dev->of_node, encoder); 207 + mux = drm_of_encoder_active_endpoint_id(hdmi->dev->of_node, encoder); 208 208 if (mux) 209 209 val = HDMI_SEL_VOP_LIT | (HDMI_SEL_VOP_LIT << 16); 210 210 else
-30
drivers/gpu/drm/rockchip/rockchip_drm_drv.c
··· 384 384 rockchip_drm_sys_resume) 385 385 }; 386 386 387 - /* 388 - * @node: device tree node containing encoder input ports 389 - * @encoder: drm_encoder 390 - */ 391 - int rockchip_drm_encoder_get_mux_id(struct device_node *node, 392 - struct drm_encoder *encoder) 393 - { 394 - struct device_node *ep; 395 - struct drm_crtc *crtc = encoder->crtc; 396 - struct of_endpoint endpoint; 397 - struct device_node *port; 398 - int ret; 399 - 400 - if (!node || !crtc) 401 - return -EINVAL; 402 - 403 - for_each_endpoint_of_node(node, ep) { 404 - port = of_graph_get_remote_port(ep); 405 - of_node_put(port); 406 - if (port == crtc->port) { 407 - ret = of_graph_parse_endpoint(ep, &endpoint); 408 - of_node_put(ep); 409 - return ret ?: endpoint.id; 410 - } 411 - } 412 - 413 - return -EINVAL; 414 - } 415 - EXPORT_SYMBOL_GPL(rockchip_drm_encoder_get_mux_id); 416 - 417 387 static int compare_of(struct device *dev, void *data) 418 388 { 419 389 struct device_node *np = data;
-2
drivers/gpu/drm/rockchip/rockchip_drm_drv.h
··· 67 67 int rockchip_register_crtc_funcs(struct drm_crtc *crtc, 68 68 const struct rockchip_crtc_funcs *crtc_funcs); 69 69 void rockchip_unregister_crtc_funcs(struct drm_crtc *crtc); 70 - int rockchip_drm_encoder_get_mux_id(struct device_node *node, 71 - struct drm_encoder *encoder); 72 70 int rockchip_drm_crtc_mode_config(struct drm_crtc *crtc, int connector_type, 73 71 int out_mode); 74 72 int rockchip_drm_dma_attach_device(struct drm_device *drm_dev,
+26 -27
drivers/gpu/ipu-v3/ipu-dc.c
··· 171 171 u32 bus_format, u32 width) 172 172 { 173 173 struct ipu_dc_priv *priv = dc->priv; 174 + int addr, sync; 174 175 u32 reg = 0; 175 176 int map; 176 177 ··· 183 182 return map; 184 183 } 185 184 185 + /* 186 + * In interlaced mode we need more counters to create the asymmetric 187 + * per-field VSYNC signals. The pixel active signal synchronising DC 188 + * to DI moves to signal generator #6 (see ipu-di.c). In progressive 189 + * mode counter #5 is used. 190 + */ 191 + sync = interlaced ? 6 : 5; 192 + 193 + /* Reserve 5 microcode template words for each DI */ 194 + if (dc->di) 195 + addr = 5; 196 + else 197 + addr = 0; 198 + 186 199 if (interlaced) { 187 - int addr; 188 - 189 - if (dc->di) 190 - addr = 1; 191 - else 192 - addr = 0; 193 - 194 200 dc_link_event(dc, DC_EVT_NL, addr, 3); 195 201 dc_link_event(dc, DC_EVT_EOL, addr, 2); 196 202 dc_link_event(dc, DC_EVT_NEW_DATA, addr, 1); 197 203 198 204 /* Init template microcode */ 199 - dc_write_tmpl(dc, addr, WROD(0), 0, map, SYNC_WAVE, 0, 6, 1); 205 + dc_write_tmpl(dc, addr, WROD(0), 0, map, SYNC_WAVE, 0, sync, 1); 200 206 } else { 201 - if (dc->di) { 202 - dc_link_event(dc, DC_EVT_NL, 2, 3); 203 - dc_link_event(dc, DC_EVT_EOL, 3, 2); 204 - dc_link_event(dc, DC_EVT_NEW_DATA, 1, 1); 205 - /* Init template microcode */ 206 - dc_write_tmpl(dc, 2, WROD(0), 0, map, SYNC_WAVE, 8, 5, 1); 207 - dc_write_tmpl(dc, 3, WROD(0), 0, map, SYNC_WAVE, 4, 5, 0); 208 - dc_write_tmpl(dc, 4, WRG, 0, map, NULL_WAVE, 0, 0, 1); 209 - dc_write_tmpl(dc, 1, WROD(0), 0, map, SYNC_WAVE, 0, 5, 1); 210 - } else { 211 - dc_link_event(dc, DC_EVT_NL, 5, 3); 212 - dc_link_event(dc, DC_EVT_EOL, 6, 2); 213 - dc_link_event(dc, DC_EVT_NEW_DATA, 8, 1); 214 - /* Init template microcode */ 215 - dc_write_tmpl(dc, 5, WROD(0), 0, map, SYNC_WAVE, 8, 5, 1); 216 - dc_write_tmpl(dc, 6, WROD(0), 0, map, SYNC_WAVE, 4, 5, 0); 217 - dc_write_tmpl(dc, 7, WRG, 0, map, NULL_WAVE, 0, 0, 1); 218 - dc_write_tmpl(dc, 8, WROD(0), 0, map, SYNC_WAVE, 0, 5, 1); 219 - } 207 + dc_link_event(dc, DC_EVT_NL, addr + 2, 3); 208 + dc_link_event(dc, DC_EVT_EOL, addr + 3, 2); 209 + dc_link_event(dc, DC_EVT_NEW_DATA, addr + 1, 1); 210 + 211 + /* Init template microcode */ 212 + dc_write_tmpl(dc, addr + 2, WROD(0), 0, map, SYNC_WAVE, 8, sync, 1); 213 + dc_write_tmpl(dc, addr + 3, WROD(0), 0, map, SYNC_WAVE, 4, sync, 0); 214 + dc_write_tmpl(dc, addr + 4, WRG, 0, map, NULL_WAVE, 0, 0, 1); 215 + dc_write_tmpl(dc, addr + 1, WROD(0), 0, map, SYNC_WAVE, 0, sync, 1); 220 216 } 217 + 221 218 dc_link_event(dc, DC_EVT_NF, 0, 0); 222 219 dc_link_event(dc, DC_EVT_NFIELD, 0, 0); 223 220 dc_link_event(dc, DC_EVT_EOF, 0, 0);
+33
include/drm/drm_of.h
··· 1 1 #ifndef __DRM_OF_H__ 2 2 #define __DRM_OF_H__ 3 3 4 + #include <linux/of_graph.h> 5 + 4 6 struct component_master_ops; 5 7 struct device; 6 8 struct drm_device; 9 + struct drm_encoder; 7 10 struct device_node; 8 11 9 12 #ifdef CONFIG_OF ··· 15 12 extern int drm_of_component_probe(struct device *dev, 16 13 int (*compare_of)(struct device *, void *), 17 14 const struct component_master_ops *m_ops); 15 + extern int drm_of_encoder_active_endpoint(struct device_node *node, 16 + struct drm_encoder *encoder, 17 + struct of_endpoint *endpoint); 18 18 #else 19 19 static inline uint32_t drm_of_find_possible_crtcs(struct drm_device *dev, 20 20 struct device_node *port) ··· 32 26 { 33 27 return -EINVAL; 34 28 } 29 + 30 + static inline int drm_of_encoder_active_endpoint(struct device_node *node, 31 + struct drm_encoder *encoder, 32 + struct of_endpoint *endpoint) 33 + { 34 + return -EINVAL; 35 + } 35 36 #endif 37 + 38 + static inline int drm_of_encoder_active_endpoint_id(struct device_node *node, 39 + struct drm_encoder *encoder) 40 + { 41 + struct of_endpoint endpoint; 42 + int ret = drm_of_encoder_active_endpoint(node, encoder, 43 + &endpoint); 44 + 45 + return ret ?: endpoint.id; 46 + } 47 + 48 + static inline int drm_of_encoder_active_port_id(struct device_node *node, 49 + struct drm_encoder *encoder) 50 + { 51 + struct of_endpoint endpoint; 52 + int ret = drm_of_encoder_active_endpoint(node, encoder, 53 + &endpoint); 54 + 55 + return ret ?: endpoint.port; 56 + } 36 57 37 58 #endif /* __DRM_OF_H__ */