Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v6.7-rc3 1117 lines 31 kB view raw
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Copyright (c) 2015 MediaTek Inc. 4 */ 5 6#include <linux/clk.h> 7#include <linux/dma-mapping.h> 8#include <linux/mailbox_controller.h> 9#include <linux/of.h> 10#include <linux/pm_runtime.h> 11#include <linux/soc/mediatek/mtk-cmdq.h> 12#include <linux/soc/mediatek/mtk-mmsys.h> 13#include <linux/soc/mediatek/mtk-mutex.h> 14 15#include <asm/barrier.h> 16 17#include <drm/drm_atomic.h> 18#include <drm/drm_atomic_helper.h> 19#include <drm/drm_probe_helper.h> 20#include <drm/drm_vblank.h> 21 22#include "mtk_drm_drv.h" 23#include "mtk_drm_crtc.h" 24#include "mtk_drm_ddp_comp.h" 25#include "mtk_drm_gem.h" 26#include "mtk_drm_plane.h" 27 28/* 29 * struct mtk_drm_crtc - MediaTek specific crtc structure. 30 * @base: crtc object. 31 * @enabled: records whether crtc_enable succeeded 32 * @planes: array of 4 drm_plane structures, one for each overlay plane 33 * @pending_planes: whether any plane has pending changes to be applied 34 * @mmsys_dev: pointer to the mmsys device for configuration registers 35 * @mutex: handle to one of the ten disp_mutex streams 36 * @ddp_comp_nr: number of components in ddp_comp 37 * @ddp_comp: array of pointers the mtk_ddp_comp structures used by this crtc 38 * 39 * TODO: Needs update: this header is missing a bunch of member descriptions. 40 */ 41struct mtk_drm_crtc { 42 struct drm_crtc base; 43 bool enabled; 44 45 bool pending_needs_vblank; 46 struct drm_pending_vblank_event *event; 47 48 struct drm_plane *planes; 49 unsigned int layer_nr; 50 bool pending_planes; 51 bool pending_async_planes; 52 53#if IS_REACHABLE(CONFIG_MTK_CMDQ) 54 struct cmdq_client cmdq_client; 55 struct cmdq_pkt cmdq_handle; 56 u32 cmdq_event; 57 u32 cmdq_vblank_cnt; 58 wait_queue_head_t cb_blocking_queue; 59#endif 60 61 struct device *mmsys_dev; 62 struct device *dma_dev; 63 struct mtk_mutex *mutex; 64 unsigned int ddp_comp_nr; 65 struct mtk_ddp_comp **ddp_comp; 66 unsigned int num_conn_routes; 67 const struct mtk_drm_route *conn_routes; 68 69 /* lock for display hardware access */ 70 struct mutex hw_lock; 71 bool config_updating; 72}; 73 74struct mtk_crtc_state { 75 struct drm_crtc_state base; 76 77 bool pending_config; 78 unsigned int pending_width; 79 unsigned int pending_height; 80 unsigned int pending_vrefresh; 81}; 82 83static inline struct mtk_drm_crtc *to_mtk_crtc(struct drm_crtc *c) 84{ 85 return container_of(c, struct mtk_drm_crtc, base); 86} 87 88static inline struct mtk_crtc_state *to_mtk_crtc_state(struct drm_crtc_state *s) 89{ 90 return container_of(s, struct mtk_crtc_state, base); 91} 92 93static void mtk_drm_crtc_finish_page_flip(struct mtk_drm_crtc *mtk_crtc) 94{ 95 struct drm_crtc *crtc = &mtk_crtc->base; 96 unsigned long flags; 97 98 spin_lock_irqsave(&crtc->dev->event_lock, flags); 99 drm_crtc_send_vblank_event(crtc, mtk_crtc->event); 100 drm_crtc_vblank_put(crtc); 101 mtk_crtc->event = NULL; 102 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 103} 104 105static void mtk_drm_finish_page_flip(struct mtk_drm_crtc *mtk_crtc) 106{ 107 drm_crtc_handle_vblank(&mtk_crtc->base); 108 if (!mtk_crtc->config_updating && mtk_crtc->pending_needs_vblank) { 109 mtk_drm_crtc_finish_page_flip(mtk_crtc); 110 mtk_crtc->pending_needs_vblank = false; 111 } 112} 113 114#if IS_REACHABLE(CONFIG_MTK_CMDQ) 115static int mtk_drm_cmdq_pkt_create(struct cmdq_client *client, struct cmdq_pkt *pkt, 116 size_t size) 117{ 118 struct device *dev; 119 dma_addr_t dma_addr; 120 121 pkt->va_base = kzalloc(size, GFP_KERNEL); 122 if (!pkt->va_base) 123 return -ENOMEM; 124 125 pkt->buf_size = size; 126 pkt->cl = (void *)client; 127 128 dev = client->chan->mbox->dev; 129 dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size, 130 DMA_TO_DEVICE); 131 if (dma_mapping_error(dev, dma_addr)) { 132 dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size); 133 kfree(pkt->va_base); 134 return -ENOMEM; 135 } 136 137 pkt->pa_base = dma_addr; 138 139 return 0; 140} 141 142static void mtk_drm_cmdq_pkt_destroy(struct cmdq_pkt *pkt) 143{ 144 struct cmdq_client *client = (struct cmdq_client *)pkt->cl; 145 146 dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size, 147 DMA_TO_DEVICE); 148 kfree(pkt->va_base); 149} 150#endif 151 152static void mtk_drm_crtc_destroy(struct drm_crtc *crtc) 153{ 154 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); 155 int i; 156 157 mtk_mutex_put(mtk_crtc->mutex); 158#if IS_REACHABLE(CONFIG_MTK_CMDQ) 159 mtk_drm_cmdq_pkt_destroy(&mtk_crtc->cmdq_handle); 160 161 if (mtk_crtc->cmdq_client.chan) { 162 mbox_free_channel(mtk_crtc->cmdq_client.chan); 163 mtk_crtc->cmdq_client.chan = NULL; 164 } 165#endif 166 167 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { 168 struct mtk_ddp_comp *comp; 169 170 comp = mtk_crtc->ddp_comp[i]; 171 mtk_ddp_comp_unregister_vblank_cb(comp); 172 } 173 174 drm_crtc_cleanup(crtc); 175} 176 177static void mtk_drm_crtc_reset(struct drm_crtc *crtc) 178{ 179 struct mtk_crtc_state *state; 180 181 if (crtc->state) 182 __drm_atomic_helper_crtc_destroy_state(crtc->state); 183 184 kfree(to_mtk_crtc_state(crtc->state)); 185 crtc->state = NULL; 186 187 state = kzalloc(sizeof(*state), GFP_KERNEL); 188 if (state) 189 __drm_atomic_helper_crtc_reset(crtc, &state->base); 190} 191 192static struct drm_crtc_state *mtk_drm_crtc_duplicate_state(struct drm_crtc *crtc) 193{ 194 struct mtk_crtc_state *state; 195 196 state = kmalloc(sizeof(*state), GFP_KERNEL); 197 if (!state) 198 return NULL; 199 200 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base); 201 202 WARN_ON(state->base.crtc != crtc); 203 state->base.crtc = crtc; 204 state->pending_config = false; 205 206 return &state->base; 207} 208 209static void mtk_drm_crtc_destroy_state(struct drm_crtc *crtc, 210 struct drm_crtc_state *state) 211{ 212 __drm_atomic_helper_crtc_destroy_state(state); 213 kfree(to_mtk_crtc_state(state)); 214} 215 216static bool mtk_drm_crtc_mode_fixup(struct drm_crtc *crtc, 217 const struct drm_display_mode *mode, 218 struct drm_display_mode *adjusted_mode) 219{ 220 /* Nothing to do here, but this callback is mandatory. */ 221 return true; 222} 223 224static void mtk_drm_crtc_mode_set_nofb(struct drm_crtc *crtc) 225{ 226 struct mtk_crtc_state *state = to_mtk_crtc_state(crtc->state); 227 228 state->pending_width = crtc->mode.hdisplay; 229 state->pending_height = crtc->mode.vdisplay; 230 state->pending_vrefresh = drm_mode_vrefresh(&crtc->mode); 231 wmb(); /* Make sure the above parameters are set before update */ 232 state->pending_config = true; 233} 234 235static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc) 236{ 237 int ret; 238 int i; 239 240 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { 241 ret = mtk_ddp_comp_clk_enable(mtk_crtc->ddp_comp[i]); 242 if (ret) { 243 DRM_ERROR("Failed to enable clock %d: %d\n", i, ret); 244 goto err; 245 } 246 } 247 248 return 0; 249err: 250 while (--i >= 0) 251 mtk_ddp_comp_clk_disable(mtk_crtc->ddp_comp[i]); 252 return ret; 253} 254 255static void mtk_crtc_ddp_clk_disable(struct mtk_drm_crtc *mtk_crtc) 256{ 257 int i; 258 259 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) 260 mtk_ddp_comp_clk_disable(mtk_crtc->ddp_comp[i]); 261} 262 263static 264struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc, 265 struct drm_plane *plane, 266 unsigned int *local_layer) 267{ 268 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); 269 struct mtk_ddp_comp *comp; 270 int i, count = 0; 271 unsigned int local_index = plane - mtk_crtc->planes; 272 273 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { 274 comp = mtk_crtc->ddp_comp[i]; 275 if (local_index < (count + mtk_ddp_comp_layer_nr(comp))) { 276 *local_layer = local_index - count; 277 return comp; 278 } 279 count += mtk_ddp_comp_layer_nr(comp); 280 } 281 282 WARN(1, "Failed to find component for plane %d\n", plane->index); 283 return NULL; 284} 285 286#if IS_REACHABLE(CONFIG_MTK_CMDQ) 287static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg) 288{ 289 struct cmdq_cb_data *data = mssg; 290 struct cmdq_client *cmdq_cl = container_of(cl, struct cmdq_client, client); 291 struct mtk_drm_crtc *mtk_crtc = container_of(cmdq_cl, struct mtk_drm_crtc, cmdq_client); 292 struct mtk_crtc_state *state; 293 unsigned int i; 294 295 if (data->sta < 0) 296 return; 297 298 state = to_mtk_crtc_state(mtk_crtc->base.state); 299 300 state->pending_config = false; 301 302 if (mtk_crtc->pending_planes) { 303 for (i = 0; i < mtk_crtc->layer_nr; i++) { 304 struct drm_plane *plane = &mtk_crtc->planes[i]; 305 struct mtk_plane_state *plane_state; 306 307 plane_state = to_mtk_plane_state(plane->state); 308 309 plane_state->pending.config = false; 310 } 311 mtk_crtc->pending_planes = false; 312 } 313 314 if (mtk_crtc->pending_async_planes) { 315 for (i = 0; i < mtk_crtc->layer_nr; i++) { 316 struct drm_plane *plane = &mtk_crtc->planes[i]; 317 struct mtk_plane_state *plane_state; 318 319 plane_state = to_mtk_plane_state(plane->state); 320 321 plane_state->pending.async_config = false; 322 } 323 mtk_crtc->pending_async_planes = false; 324 } 325 326 mtk_crtc->cmdq_vblank_cnt = 0; 327 wake_up(&mtk_crtc->cb_blocking_queue); 328} 329#endif 330 331static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc) 332{ 333 struct drm_crtc *crtc = &mtk_crtc->base; 334 struct drm_connector *connector; 335 struct drm_encoder *encoder; 336 struct drm_connector_list_iter conn_iter; 337 unsigned int width, height, vrefresh, bpc = MTK_MAX_BPC; 338 int ret; 339 int i; 340 341 if (WARN_ON(!crtc->state)) 342 return -EINVAL; 343 344 width = crtc->state->adjusted_mode.hdisplay; 345 height = crtc->state->adjusted_mode.vdisplay; 346 vrefresh = drm_mode_vrefresh(&crtc->state->adjusted_mode); 347 348 drm_for_each_encoder(encoder, crtc->dev) { 349 if (encoder->crtc != crtc) 350 continue; 351 352 drm_connector_list_iter_begin(crtc->dev, &conn_iter); 353 drm_for_each_connector_iter(connector, &conn_iter) { 354 if (connector->encoder != encoder) 355 continue; 356 if (connector->display_info.bpc != 0 && 357 bpc > connector->display_info.bpc) 358 bpc = connector->display_info.bpc; 359 } 360 drm_connector_list_iter_end(&conn_iter); 361 } 362 363 ret = pm_runtime_resume_and_get(crtc->dev->dev); 364 if (ret < 0) { 365 DRM_ERROR("Failed to enable power domain: %d\n", ret); 366 return ret; 367 } 368 369 ret = mtk_mutex_prepare(mtk_crtc->mutex); 370 if (ret < 0) { 371 DRM_ERROR("Failed to enable mutex clock: %d\n", ret); 372 goto err_pm_runtime_put; 373 } 374 375 ret = mtk_crtc_ddp_clk_enable(mtk_crtc); 376 if (ret < 0) { 377 DRM_ERROR("Failed to enable component clocks: %d\n", ret); 378 goto err_mutex_unprepare; 379 } 380 381 for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) { 382 if (!mtk_ddp_comp_connect(mtk_crtc->ddp_comp[i], mtk_crtc->mmsys_dev, 383 mtk_crtc->ddp_comp[i + 1]->id)) 384 mtk_mmsys_ddp_connect(mtk_crtc->mmsys_dev, 385 mtk_crtc->ddp_comp[i]->id, 386 mtk_crtc->ddp_comp[i + 1]->id); 387 if (!mtk_ddp_comp_add(mtk_crtc->ddp_comp[i], mtk_crtc->mutex)) 388 mtk_mutex_add_comp(mtk_crtc->mutex, 389 mtk_crtc->ddp_comp[i]->id); 390 } 391 if (!mtk_ddp_comp_add(mtk_crtc->ddp_comp[i], mtk_crtc->mutex)) 392 mtk_mutex_add_comp(mtk_crtc->mutex, mtk_crtc->ddp_comp[i]->id); 393 mtk_mutex_enable(mtk_crtc->mutex); 394 395 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { 396 struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[i]; 397 398 if (i == 1) 399 mtk_ddp_comp_bgclr_in_on(comp); 400 401 mtk_ddp_comp_config(comp, width, height, vrefresh, bpc, NULL); 402 mtk_ddp_comp_start(comp); 403 } 404 405 /* Initially configure all planes */ 406 for (i = 0; i < mtk_crtc->layer_nr; i++) { 407 struct drm_plane *plane = &mtk_crtc->planes[i]; 408 struct mtk_plane_state *plane_state; 409 struct mtk_ddp_comp *comp; 410 unsigned int local_layer; 411 412 plane_state = to_mtk_plane_state(plane->state); 413 414 /* should not enable layer before crtc enabled */ 415 plane_state->pending.enable = false; 416 comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer); 417 if (comp) 418 mtk_ddp_comp_layer_config(comp, local_layer, 419 plane_state, NULL); 420 } 421 422 return 0; 423 424err_mutex_unprepare: 425 mtk_mutex_unprepare(mtk_crtc->mutex); 426err_pm_runtime_put: 427 pm_runtime_put(crtc->dev->dev); 428 return ret; 429} 430 431static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc) 432{ 433 struct drm_device *drm = mtk_crtc->base.dev; 434 struct drm_crtc *crtc = &mtk_crtc->base; 435 int i; 436 437 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { 438 mtk_ddp_comp_stop(mtk_crtc->ddp_comp[i]); 439 if (i == 1) 440 mtk_ddp_comp_bgclr_in_off(mtk_crtc->ddp_comp[i]); 441 } 442 443 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) 444 if (!mtk_ddp_comp_remove(mtk_crtc->ddp_comp[i], mtk_crtc->mutex)) 445 mtk_mutex_remove_comp(mtk_crtc->mutex, 446 mtk_crtc->ddp_comp[i]->id); 447 mtk_mutex_disable(mtk_crtc->mutex); 448 for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) { 449 if (!mtk_ddp_comp_disconnect(mtk_crtc->ddp_comp[i], mtk_crtc->mmsys_dev, 450 mtk_crtc->ddp_comp[i + 1]->id)) 451 mtk_mmsys_ddp_disconnect(mtk_crtc->mmsys_dev, 452 mtk_crtc->ddp_comp[i]->id, 453 mtk_crtc->ddp_comp[i + 1]->id); 454 if (!mtk_ddp_comp_remove(mtk_crtc->ddp_comp[i], mtk_crtc->mutex)) 455 mtk_mutex_remove_comp(mtk_crtc->mutex, 456 mtk_crtc->ddp_comp[i]->id); 457 } 458 if (!mtk_ddp_comp_remove(mtk_crtc->ddp_comp[i], mtk_crtc->mutex)) 459 mtk_mutex_remove_comp(mtk_crtc->mutex, mtk_crtc->ddp_comp[i]->id); 460 mtk_crtc_ddp_clk_disable(mtk_crtc); 461 mtk_mutex_unprepare(mtk_crtc->mutex); 462 463 pm_runtime_put(drm->dev); 464 465 if (crtc->state->event && !crtc->state->active) { 466 spin_lock_irq(&crtc->dev->event_lock); 467 drm_crtc_send_vblank_event(crtc, crtc->state->event); 468 crtc->state->event = NULL; 469 spin_unlock_irq(&crtc->dev->event_lock); 470 } 471} 472 473static void mtk_crtc_ddp_config(struct drm_crtc *crtc, 474 struct cmdq_pkt *cmdq_handle) 475{ 476 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); 477 struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state); 478 struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; 479 unsigned int i; 480 unsigned int local_layer; 481 482 /* 483 * TODO: instead of updating the registers here, we should prepare 484 * working registers in atomic_commit and let the hardware command 485 * queue update module registers on vblank. 486 */ 487 if (state->pending_config) { 488 mtk_ddp_comp_config(comp, state->pending_width, 489 state->pending_height, 490 state->pending_vrefresh, 0, 491 cmdq_handle); 492 493 if (!cmdq_handle) 494 state->pending_config = false; 495 } 496 497 if (mtk_crtc->pending_planes) { 498 for (i = 0; i < mtk_crtc->layer_nr; i++) { 499 struct drm_plane *plane = &mtk_crtc->planes[i]; 500 struct mtk_plane_state *plane_state; 501 502 plane_state = to_mtk_plane_state(plane->state); 503 504 if (!plane_state->pending.config) 505 continue; 506 507 comp = mtk_drm_ddp_comp_for_plane(crtc, plane, 508 &local_layer); 509 510 if (comp) 511 mtk_ddp_comp_layer_config(comp, local_layer, 512 plane_state, 513 cmdq_handle); 514 if (!cmdq_handle) 515 plane_state->pending.config = false; 516 } 517 518 if (!cmdq_handle) 519 mtk_crtc->pending_planes = false; 520 } 521 522 if (mtk_crtc->pending_async_planes) { 523 for (i = 0; i < mtk_crtc->layer_nr; i++) { 524 struct drm_plane *plane = &mtk_crtc->planes[i]; 525 struct mtk_plane_state *plane_state; 526 527 plane_state = to_mtk_plane_state(plane->state); 528 529 if (!plane_state->pending.async_config) 530 continue; 531 532 comp = mtk_drm_ddp_comp_for_plane(crtc, plane, 533 &local_layer); 534 535 if (comp) 536 mtk_ddp_comp_layer_config(comp, local_layer, 537 plane_state, 538 cmdq_handle); 539 if (!cmdq_handle) 540 plane_state->pending.async_config = false; 541 } 542 543 if (!cmdq_handle) 544 mtk_crtc->pending_async_planes = false; 545 } 546} 547 548static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc, 549 bool needs_vblank) 550{ 551#if IS_REACHABLE(CONFIG_MTK_CMDQ) 552 struct cmdq_pkt *cmdq_handle = &mtk_crtc->cmdq_handle; 553#endif 554 struct drm_crtc *crtc = &mtk_crtc->base; 555 struct mtk_drm_private *priv = crtc->dev->dev_private; 556 unsigned int pending_planes = 0, pending_async_planes = 0; 557 int i; 558 559 mutex_lock(&mtk_crtc->hw_lock); 560 mtk_crtc->config_updating = true; 561 if (needs_vblank) 562 mtk_crtc->pending_needs_vblank = true; 563 564 for (i = 0; i < mtk_crtc->layer_nr; i++) { 565 struct drm_plane *plane = &mtk_crtc->planes[i]; 566 struct mtk_plane_state *plane_state; 567 568 plane_state = to_mtk_plane_state(plane->state); 569 if (plane_state->pending.dirty) { 570 plane_state->pending.config = true; 571 plane_state->pending.dirty = false; 572 pending_planes |= BIT(i); 573 } else if (plane_state->pending.async_dirty) { 574 plane_state->pending.async_config = true; 575 plane_state->pending.async_dirty = false; 576 pending_async_planes |= BIT(i); 577 } 578 } 579 if (pending_planes) 580 mtk_crtc->pending_planes = true; 581 if (pending_async_planes) 582 mtk_crtc->pending_async_planes = true; 583 584 if (priv->data->shadow_register) { 585 mtk_mutex_acquire(mtk_crtc->mutex); 586 mtk_crtc_ddp_config(crtc, NULL); 587 mtk_mutex_release(mtk_crtc->mutex); 588 } 589#if IS_REACHABLE(CONFIG_MTK_CMDQ) 590 if (mtk_crtc->cmdq_client.chan) { 591 mbox_flush(mtk_crtc->cmdq_client.chan, 2000); 592 cmdq_handle->cmd_buf_size = 0; 593 cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event); 594 cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event, false); 595 mtk_crtc_ddp_config(crtc, cmdq_handle); 596 cmdq_pkt_finalize(cmdq_handle); 597 dma_sync_single_for_device(mtk_crtc->cmdq_client.chan->mbox->dev, 598 cmdq_handle->pa_base, 599 cmdq_handle->cmd_buf_size, 600 DMA_TO_DEVICE); 601 /* 602 * CMDQ command should execute in next 3 vblank. 603 * One vblank interrupt before send message (occasionally) 604 * and one vblank interrupt after cmdq done, 605 * so it's timeout after 3 vblank interrupt. 606 * If it fail to execute in next 3 vblank, timeout happen. 607 */ 608 mtk_crtc->cmdq_vblank_cnt = 3; 609 610 mbox_send_message(mtk_crtc->cmdq_client.chan, cmdq_handle); 611 mbox_client_txdone(mtk_crtc->cmdq_client.chan, 0); 612 } 613#endif 614 mtk_crtc->config_updating = false; 615 mutex_unlock(&mtk_crtc->hw_lock); 616} 617 618static void mtk_crtc_ddp_irq(void *data) 619{ 620 struct drm_crtc *crtc = data; 621 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); 622 struct mtk_drm_private *priv = crtc->dev->dev_private; 623 624#if IS_REACHABLE(CONFIG_MTK_CMDQ) 625 if (!priv->data->shadow_register && !mtk_crtc->cmdq_client.chan) 626 mtk_crtc_ddp_config(crtc, NULL); 627 else if (mtk_crtc->cmdq_vblank_cnt > 0 && --mtk_crtc->cmdq_vblank_cnt == 0) 628 DRM_ERROR("mtk_crtc %d CMDQ execute command timeout!\n", 629 drm_crtc_index(&mtk_crtc->base)); 630#else 631 if (!priv->data->shadow_register) 632 mtk_crtc_ddp_config(crtc, NULL); 633#endif 634 mtk_drm_finish_page_flip(mtk_crtc); 635} 636 637static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc) 638{ 639 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); 640 struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; 641 642 mtk_ddp_comp_enable_vblank(comp); 643 644 return 0; 645} 646 647static void mtk_drm_crtc_disable_vblank(struct drm_crtc *crtc) 648{ 649 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); 650 struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; 651 652 mtk_ddp_comp_disable_vblank(comp); 653} 654 655static void mtk_drm_crtc_update_output(struct drm_crtc *crtc, 656 struct drm_atomic_state *state) 657{ 658 int crtc_index = drm_crtc_index(crtc); 659 int i; 660 struct device *dev; 661 struct drm_crtc_state *crtc_state = state->crtcs[crtc_index].new_state; 662 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); 663 struct mtk_drm_private *priv; 664 unsigned int encoder_mask = crtc_state->encoder_mask; 665 666 if (!crtc_state->connectors_changed) 667 return; 668 669 if (!mtk_crtc->num_conn_routes) 670 return; 671 672 priv = ((struct mtk_drm_private *)crtc->dev->dev_private)->all_drm_private[crtc_index]; 673 dev = priv->dev; 674 675 dev_dbg(dev, "connector change:%d, encoder mask:0x%x for crtc:%d\n", 676 crtc_state->connectors_changed, encoder_mask, crtc_index); 677 678 for (i = 0; i < mtk_crtc->num_conn_routes; i++) { 679 unsigned int comp_id = mtk_crtc->conn_routes[i].route_ddp; 680 struct mtk_ddp_comp *comp = &priv->ddp_comp[comp_id]; 681 682 if (comp->encoder_index >= 0 && 683 (encoder_mask & BIT(comp->encoder_index))) { 684 mtk_crtc->ddp_comp[mtk_crtc->ddp_comp_nr - 1] = comp; 685 dev_dbg(dev, "Add comp_id: %d at path index %d\n", 686 comp->id, mtk_crtc->ddp_comp_nr - 1); 687 break; 688 } 689 } 690} 691 692int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane, 693 struct mtk_plane_state *state) 694{ 695 unsigned int local_layer; 696 struct mtk_ddp_comp *comp; 697 698 comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer); 699 if (comp) 700 return mtk_ddp_comp_layer_check(comp, local_layer, state); 701 return 0; 702} 703 704void mtk_drm_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane, 705 struct drm_atomic_state *state) 706{ 707 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); 708 709 if (!mtk_crtc->enabled) 710 return; 711 712 mtk_drm_crtc_update_config(mtk_crtc, false); 713} 714 715static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc, 716 struct drm_atomic_state *state) 717{ 718 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); 719 struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; 720 int ret; 721 722 DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id); 723 724 ret = pm_runtime_resume_and_get(comp->dev); 725 if (ret < 0) { 726 DRM_DEV_ERROR(comp->dev, "Failed to enable power domain: %d\n", ret); 727 return; 728 } 729 730 mtk_drm_crtc_update_output(crtc, state); 731 732 ret = mtk_crtc_ddp_hw_init(mtk_crtc); 733 if (ret) { 734 pm_runtime_put(comp->dev); 735 return; 736 } 737 738 drm_crtc_vblank_on(crtc); 739 mtk_crtc->enabled = true; 740} 741 742static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc, 743 struct drm_atomic_state *state) 744{ 745 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); 746 struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; 747 int i, ret; 748 749 DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id); 750 if (!mtk_crtc->enabled) 751 return; 752 753 /* Set all pending plane state to disabled */ 754 for (i = 0; i < mtk_crtc->layer_nr; i++) { 755 struct drm_plane *plane = &mtk_crtc->planes[i]; 756 struct mtk_plane_state *plane_state; 757 758 plane_state = to_mtk_plane_state(plane->state); 759 plane_state->pending.enable = false; 760 plane_state->pending.config = true; 761 } 762 mtk_crtc->pending_planes = true; 763 764 mtk_drm_crtc_update_config(mtk_crtc, false); 765#if IS_REACHABLE(CONFIG_MTK_CMDQ) 766 /* Wait for planes to be disabled by cmdq */ 767 if (mtk_crtc->cmdq_client.chan) 768 wait_event_timeout(mtk_crtc->cb_blocking_queue, 769 mtk_crtc->cmdq_vblank_cnt == 0, 770 msecs_to_jiffies(500)); 771#endif 772 /* Wait for planes to be disabled */ 773 drm_crtc_wait_one_vblank(crtc); 774 775 drm_crtc_vblank_off(crtc); 776 mtk_crtc_ddp_hw_fini(mtk_crtc); 777 ret = pm_runtime_put(comp->dev); 778 if (ret < 0) 779 DRM_DEV_ERROR(comp->dev, "Failed to disable power domain: %d\n", ret); 780 781 mtk_crtc->enabled = false; 782} 783 784static void mtk_drm_crtc_atomic_begin(struct drm_crtc *crtc, 785 struct drm_atomic_state *state) 786{ 787 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, 788 crtc); 789 struct mtk_crtc_state *mtk_crtc_state = to_mtk_crtc_state(crtc_state); 790 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); 791 792 if (mtk_crtc->event && mtk_crtc_state->base.event) 793 DRM_ERROR("new event while there is still a pending event\n"); 794 795 if (mtk_crtc_state->base.event) { 796 mtk_crtc_state->base.event->pipe = drm_crtc_index(crtc); 797 WARN_ON(drm_crtc_vblank_get(crtc) != 0); 798 mtk_crtc->event = mtk_crtc_state->base.event; 799 mtk_crtc_state->base.event = NULL; 800 } 801} 802 803static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc, 804 struct drm_atomic_state *state) 805{ 806 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); 807 int i; 808 809 if (crtc->state->color_mgmt_changed) 810 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { 811 mtk_ddp_gamma_set(mtk_crtc->ddp_comp[i], crtc->state); 812 mtk_ddp_ctm_set(mtk_crtc->ddp_comp[i], crtc->state); 813 } 814 mtk_drm_crtc_update_config(mtk_crtc, !!mtk_crtc->event); 815} 816 817static const struct drm_crtc_funcs mtk_crtc_funcs = { 818 .set_config = drm_atomic_helper_set_config, 819 .page_flip = drm_atomic_helper_page_flip, 820 .destroy = mtk_drm_crtc_destroy, 821 .reset = mtk_drm_crtc_reset, 822 .atomic_duplicate_state = mtk_drm_crtc_duplicate_state, 823 .atomic_destroy_state = mtk_drm_crtc_destroy_state, 824 .enable_vblank = mtk_drm_crtc_enable_vblank, 825 .disable_vblank = mtk_drm_crtc_disable_vblank, 826}; 827 828static const struct drm_crtc_helper_funcs mtk_crtc_helper_funcs = { 829 .mode_fixup = mtk_drm_crtc_mode_fixup, 830 .mode_set_nofb = mtk_drm_crtc_mode_set_nofb, 831 .atomic_begin = mtk_drm_crtc_atomic_begin, 832 .atomic_flush = mtk_drm_crtc_atomic_flush, 833 .atomic_enable = mtk_drm_crtc_atomic_enable, 834 .atomic_disable = mtk_drm_crtc_atomic_disable, 835}; 836 837static int mtk_drm_crtc_init(struct drm_device *drm, 838 struct mtk_drm_crtc *mtk_crtc, 839 unsigned int pipe) 840{ 841 struct drm_plane *primary = NULL; 842 struct drm_plane *cursor = NULL; 843 int i, ret; 844 845 for (i = 0; i < mtk_crtc->layer_nr; i++) { 846 if (mtk_crtc->planes[i].type == DRM_PLANE_TYPE_PRIMARY) 847 primary = &mtk_crtc->planes[i]; 848 else if (mtk_crtc->planes[i].type == DRM_PLANE_TYPE_CURSOR) 849 cursor = &mtk_crtc->planes[i]; 850 } 851 852 ret = drm_crtc_init_with_planes(drm, &mtk_crtc->base, primary, cursor, 853 &mtk_crtc_funcs, NULL); 854 if (ret) 855 goto err_cleanup_crtc; 856 857 drm_crtc_helper_add(&mtk_crtc->base, &mtk_crtc_helper_funcs); 858 859 return 0; 860 861err_cleanup_crtc: 862 drm_crtc_cleanup(&mtk_crtc->base); 863 return ret; 864} 865 866static int mtk_drm_crtc_num_comp_planes(struct mtk_drm_crtc *mtk_crtc, 867 int comp_idx) 868{ 869 struct mtk_ddp_comp *comp; 870 871 if (comp_idx > 1) 872 return 0; 873 874 comp = mtk_crtc->ddp_comp[comp_idx]; 875 if (!comp->funcs) 876 return 0; 877 878 if (comp_idx == 1 && !comp->funcs->bgclr_in_on) 879 return 0; 880 881 return mtk_ddp_comp_layer_nr(comp); 882} 883 884static inline 885enum drm_plane_type mtk_drm_crtc_plane_type(unsigned int plane_idx, 886 unsigned int num_planes) 887{ 888 if (plane_idx == 0) 889 return DRM_PLANE_TYPE_PRIMARY; 890 else if (plane_idx == (num_planes - 1)) 891 return DRM_PLANE_TYPE_CURSOR; 892 else 893 return DRM_PLANE_TYPE_OVERLAY; 894 895} 896 897static int mtk_drm_crtc_init_comp_planes(struct drm_device *drm_dev, 898 struct mtk_drm_crtc *mtk_crtc, 899 int comp_idx, int pipe) 900{ 901 int num_planes = mtk_drm_crtc_num_comp_planes(mtk_crtc, comp_idx); 902 struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[comp_idx]; 903 int i, ret; 904 905 for (i = 0; i < num_planes; i++) { 906 ret = mtk_plane_init(drm_dev, 907 &mtk_crtc->planes[mtk_crtc->layer_nr], 908 BIT(pipe), 909 mtk_drm_crtc_plane_type(mtk_crtc->layer_nr, 910 num_planes), 911 mtk_ddp_comp_supported_rotations(comp), 912 mtk_ddp_comp_get_formats(comp), 913 mtk_ddp_comp_get_num_formats(comp)); 914 if (ret) 915 return ret; 916 917 mtk_crtc->layer_nr++; 918 } 919 return 0; 920} 921 922struct device *mtk_drm_crtc_dma_dev_get(struct drm_crtc *crtc) 923{ 924 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); 925 926 return mtk_crtc->dma_dev; 927} 928 929int mtk_drm_crtc_create(struct drm_device *drm_dev, 930 const unsigned int *path, unsigned int path_len, 931 int priv_data_index, const struct mtk_drm_route *conn_routes, 932 unsigned int num_conn_routes) 933{ 934 struct mtk_drm_private *priv = drm_dev->dev_private; 935 struct device *dev = drm_dev->dev; 936 struct mtk_drm_crtc *mtk_crtc; 937 unsigned int num_comp_planes = 0; 938 int ret; 939 int i; 940 bool has_ctm = false; 941 uint gamma_lut_size = 0; 942 struct drm_crtc *tmp; 943 int crtc_i = 0; 944 945 if (!path) 946 return 0; 947 948 priv = priv->all_drm_private[priv_data_index]; 949 950 drm_for_each_crtc(tmp, drm_dev) 951 crtc_i++; 952 953 for (i = 0; i < path_len; i++) { 954 enum mtk_ddp_comp_id comp_id = path[i]; 955 struct device_node *node; 956 struct mtk_ddp_comp *comp; 957 958 node = priv->comp_node[comp_id]; 959 comp = &priv->ddp_comp[comp_id]; 960 961 /* Not all drm components have a DTS device node, such as ovl_adaptor, 962 * which is the drm bring up sub driver 963 */ 964 if (!node && comp_id != DDP_COMPONENT_DRM_OVL_ADAPTOR) { 965 dev_info(dev, 966 "Not creating crtc %d because component %d is disabled or missing\n", 967 crtc_i, comp_id); 968 return 0; 969 } 970 971 if (!comp->dev) { 972 dev_err(dev, "Component %pOF not initialized\n", node); 973 return -ENODEV; 974 } 975 } 976 977 mtk_crtc = devm_kzalloc(dev, sizeof(*mtk_crtc), GFP_KERNEL); 978 if (!mtk_crtc) 979 return -ENOMEM; 980 981 mtk_crtc->mmsys_dev = priv->mmsys_dev; 982 mtk_crtc->ddp_comp_nr = path_len; 983 mtk_crtc->ddp_comp = devm_kmalloc_array(dev, 984 mtk_crtc->ddp_comp_nr + (conn_routes ? 1 : 0), 985 sizeof(*mtk_crtc->ddp_comp), 986 GFP_KERNEL); 987 if (!mtk_crtc->ddp_comp) 988 return -ENOMEM; 989 990 mtk_crtc->mutex = mtk_mutex_get(priv->mutex_dev); 991 if (IS_ERR(mtk_crtc->mutex)) { 992 ret = PTR_ERR(mtk_crtc->mutex); 993 dev_err(dev, "Failed to get mutex: %d\n", ret); 994 return ret; 995 } 996 997 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { 998 unsigned int comp_id = path[i]; 999 struct mtk_ddp_comp *comp; 1000 1001 comp = &priv->ddp_comp[comp_id]; 1002 mtk_crtc->ddp_comp[i] = comp; 1003 1004 if (comp->funcs) { 1005 if (comp->funcs->gamma_set && comp->funcs->gamma_get_lut_size) { 1006 unsigned int lut_sz = mtk_ddp_gamma_get_lut_size(comp); 1007 1008 if (lut_sz) 1009 gamma_lut_size = lut_sz; 1010 } 1011 1012 if (comp->funcs->ctm_set) 1013 has_ctm = true; 1014 } 1015 1016 mtk_ddp_comp_register_vblank_cb(comp, mtk_crtc_ddp_irq, 1017 &mtk_crtc->base); 1018 } 1019 1020 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) 1021 num_comp_planes += mtk_drm_crtc_num_comp_planes(mtk_crtc, i); 1022 1023 mtk_crtc->planes = devm_kcalloc(dev, num_comp_planes, 1024 sizeof(struct drm_plane), GFP_KERNEL); 1025 if (!mtk_crtc->planes) 1026 return -ENOMEM; 1027 1028 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { 1029 ret = mtk_drm_crtc_init_comp_planes(drm_dev, mtk_crtc, i, 1030 crtc_i); 1031 if (ret) 1032 return ret; 1033 } 1034 1035 /* 1036 * Default to use the first component as the dma dev. 1037 * In the case of ovl_adaptor sub driver, it needs to use the 1038 * dma_dev_get function to get representative dma dev. 1039 */ 1040 mtk_crtc->dma_dev = mtk_ddp_comp_dma_dev_get(&priv->ddp_comp[path[0]]); 1041 1042 ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, crtc_i); 1043 if (ret < 0) 1044 return ret; 1045 1046 if (gamma_lut_size) 1047 drm_mode_crtc_set_gamma_size(&mtk_crtc->base, gamma_lut_size); 1048 drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, has_ctm, gamma_lut_size); 1049 mutex_init(&mtk_crtc->hw_lock); 1050 1051#if IS_REACHABLE(CONFIG_MTK_CMDQ) 1052 i = priv->mbox_index++; 1053 mtk_crtc->cmdq_client.client.dev = mtk_crtc->mmsys_dev; 1054 mtk_crtc->cmdq_client.client.tx_block = false; 1055 mtk_crtc->cmdq_client.client.knows_txdone = true; 1056 mtk_crtc->cmdq_client.client.rx_callback = ddp_cmdq_cb; 1057 mtk_crtc->cmdq_client.chan = 1058 mbox_request_channel(&mtk_crtc->cmdq_client.client, i); 1059 if (IS_ERR(mtk_crtc->cmdq_client.chan)) { 1060 dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n", 1061 drm_crtc_index(&mtk_crtc->base)); 1062 mtk_crtc->cmdq_client.chan = NULL; 1063 } 1064 1065 if (mtk_crtc->cmdq_client.chan) { 1066 ret = of_property_read_u32_index(priv->mutex_node, 1067 "mediatek,gce-events", 1068 i, 1069 &mtk_crtc->cmdq_event); 1070 if (ret) { 1071 dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n", 1072 drm_crtc_index(&mtk_crtc->base)); 1073 mbox_free_channel(mtk_crtc->cmdq_client.chan); 1074 mtk_crtc->cmdq_client.chan = NULL; 1075 } else { 1076 ret = mtk_drm_cmdq_pkt_create(&mtk_crtc->cmdq_client, 1077 &mtk_crtc->cmdq_handle, 1078 PAGE_SIZE); 1079 if (ret) { 1080 dev_dbg(dev, "mtk_crtc %d failed to create cmdq packet\n", 1081 drm_crtc_index(&mtk_crtc->base)); 1082 mbox_free_channel(mtk_crtc->cmdq_client.chan); 1083 mtk_crtc->cmdq_client.chan = NULL; 1084 } 1085 } 1086 1087 /* for sending blocking cmd in crtc disable */ 1088 init_waitqueue_head(&mtk_crtc->cb_blocking_queue); 1089 } 1090#endif 1091 1092 if (conn_routes) { 1093 for (i = 0; i < num_conn_routes; i++) { 1094 unsigned int comp_id = conn_routes[i].route_ddp; 1095 struct device_node *node = priv->comp_node[comp_id]; 1096 struct mtk_ddp_comp *comp = &priv->ddp_comp[comp_id]; 1097 1098 if (!comp->dev) { 1099 dev_dbg(dev, "comp_id:%d, Component %pOF not initialized\n", 1100 comp_id, node); 1101 /* mark encoder_index to -1, if route comp device is not enabled */ 1102 comp->encoder_index = -1; 1103 continue; 1104 } 1105 1106 mtk_ddp_comp_encoder_index_set(&priv->ddp_comp[comp_id]); 1107 } 1108 1109 mtk_crtc->num_conn_routes = num_conn_routes; 1110 mtk_crtc->conn_routes = conn_routes; 1111 1112 /* increase ddp_comp_nr at the end of mtk_drm_crtc_create */ 1113 mtk_crtc->ddp_comp_nr++; 1114 } 1115 1116 return 0; 1117}