Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

media: platform: mtk-mdp3: add support for parallel pipe to improve FPS

In some chips, MDP3 has the ability to utilize two pipelines to
parallelly process a single frame.
To enable this feature, multiple CMDQ clients and packets need to
be configured at the same time.

Signed-off-by: Moudy Ho <moudy.ho@mediatek.com>
Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
Signed-off-by: Sebastian Fricke <sebastian.fricke@collabora.com>
Signed-off-by: Mauro Carvalho Chehab <mchehab@kernel.org>

authored by

Moudy Ho and committed by
Mauro Carvalho Chehab
9288eae4 0e9bd2fc

+208 -56
+8
drivers/media/platform/mediatek/mdp3/mdp_cfg_data.c
··· 1060 1060 [MDP_PIPE_VPP0_SOUT] = {MDP_PIPE_VPP0_SOUT, MDP_MM_SUBSYS_1, 5}, 1061 1061 }; 1062 1062 1063 + static const struct v4l2_rect mt8195_mdp_pp_criteria = { 1064 + .width = 1920, 1065 + .height = 1080, 1066 + }; 1067 + 1063 1068 const struct mtk_mdp_driver_data mt8183_mdp_driver_data = { 1064 1069 .mdp_plat_id = MT8183, 1065 1070 .mdp_con_res = 0x14001000, ··· 1079 1074 .def_limit = &mt8183_mdp_def_limit, 1080 1075 .pipe_info = mt8183_pipe_info, 1081 1076 .pipe_info_len = ARRAY_SIZE(mt8183_pipe_info), 1077 + .pp_used = MDP_PP_USED_1, 1082 1078 }; 1083 1079 1084 1080 const struct mtk_mdp_driver_data mt8195_mdp_driver_data = { ··· 1096 1090 .def_limit = &mt8195_mdp_def_limit, 1097 1091 .pipe_info = mt8195_pipe_info, 1098 1092 .pipe_info_len = ARRAY_SIZE(mt8195_pipe_info), 1093 + .pp_criteria = &mt8195_mdp_pp_criteria, 1094 + .pp_used = MDP_PP_USED_2, 1099 1095 }; 1100 1096 1101 1097 s32 mdp_cfg_get_id_inner(struct mdp_dev *mdp_dev, enum mtk_mdp_comp_id id)
+140 -48
drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c
··· 55 55 return mdp_dev->mm_subsys[p->sub_id].mdp_mutex[p->mutex_id]; 56 56 } 57 57 58 + static u8 __get_pp_num(enum mdp_stream_type type) 59 + { 60 + switch (type) { 61 + case MDP_STREAM_TYPE_DUAL_BITBLT: 62 + return MDP_PP_USED_2; 63 + default: 64 + return MDP_PP_USED_1; 65 + } 66 + } 67 + 58 68 static enum mdp_pipe_id __get_pipe(const struct mdp_dev *mdp_dev, 59 69 enum mtk_mdp_comp_id id) 60 70 { ··· 100 90 } 101 91 102 92 return pipe_id; 93 + } 94 + 95 + static struct img_config *__get_config_offset(struct mdp_dev *mdp, 96 + struct mdp_cmdq_param *param, 97 + u8 pp_idx) 98 + { 99 + const int p_id = mdp->mdp_data->mdp_plat_id; 100 + struct device *dev = &mdp->pdev->dev; 101 + void *cfg_c, *cfg_n; 102 + long bound = mdp->vpu.config_size; 103 + 104 + if (pp_idx >= mdp->mdp_data->pp_used) 105 + goto err_param; 106 + 107 + if (CFG_CHECK(MT8183, p_id)) 108 + cfg_c = CFG_OFST(MT8183, param->config, pp_idx); 109 + else if (CFG_CHECK(MT8195, p_id)) 110 + cfg_c = CFG_OFST(MT8195, param->config, pp_idx); 111 + else 112 + goto err_param; 113 + 114 + if (CFG_CHECK(MT8183, p_id)) 115 + cfg_n = CFG_OFST(MT8183, param->config, pp_idx + 1); 116 + else if (CFG_CHECK(MT8195, p_id)) 117 + cfg_n = CFG_OFST(MT8195, param->config, pp_idx + 1); 118 + else 119 + goto err_param; 120 + 121 + if ((long)cfg_n - (long)mdp->vpu.config > bound) { 122 + dev_err(dev, "config offset %ld OOB %ld\n", (long)cfg_n, bound); 123 + cfg_c = ERR_PTR(-EFAULT); 124 + } 125 + 126 + return (struct img_config *)cfg_c; 127 + 128 + err_param: 129 + cfg_c = ERR_PTR(-EINVAL); 130 + return (struct img_config *)cfg_c; 103 131 } 104 132 105 133 static int mdp_path_subfrm_require(const struct mdp_path *path, ··· 524 476 mdp_comp_clocks_off(&mdp->pdev->dev, cmd->comps, 525 477 cmd->num_comps); 526 478 527 - atomic_dec(&mdp->job_count); 528 - wake_up(&mdp->callback_wq); 479 + if (atomic_dec_and_test(&mdp->job_count)) { 480 + if (cmd->mdp_ctx) 481 + mdp_m2m_job_finish(cmd->mdp_ctx); 482 + 483 + if (cmd->user_cmdq_cb) { 484 + struct cmdq_cb_data user_cb_data; 485 + 486 + user_cb_data.sta = cmd->data->sta; 487 + user_cb_data.pkt = cmd->data->pkt; 488 + cmd->user_cmdq_cb(user_cb_data); 489 + } 490 + wake_up(&mdp->callback_wq); 491 + } 529 492 530 493 mdp_cmdq_pkt_destroy(&cmd->pkt); 531 494 kfree(cmd->comps); ··· 560 501 561 502 data = (struct cmdq_cb_data *)mssg; 562 503 cmd = container_of(data->pkt, struct mdp_cmdq_cmd, pkt); 504 + cmd->data = data; 563 505 mdp = cmd->mdp; 564 506 dev = &mdp->pdev->dev; 565 - 566 - if (cmd->mdp_ctx) 567 - mdp_m2m_job_finish(cmd->mdp_ctx); 568 - 569 - if (cmd->user_cmdq_cb) { 570 - struct cmdq_cb_data user_cb_data; 571 - 572 - user_cb_data.sta = data->sta; 573 - user_cb_data.pkt = data->pkt; 574 - cmd->user_cmdq_cb(user_cb_data); 575 - } 576 507 577 508 INIT_WORK(&cmd->auto_release_work, mdp_auto_release_work); 578 509 if (!queue_work(mdp->clock_wq, &cmd->auto_release_work)) { ··· 575 526 mdp_comp_clocks_off(&mdp->pdev->dev, cmd->comps, 576 527 cmd->num_comps); 577 528 578 - atomic_dec(&mdp->job_count); 579 - wake_up(&mdp->callback_wq); 529 + if (atomic_dec_and_test(&mdp->job_count)) 530 + wake_up(&mdp->callback_wq); 580 531 581 532 mdp_cmdq_pkt_destroy(&cmd->pkt); 582 533 kfree(cmd->comps); ··· 586 537 } 587 538 } 588 539 589 - int mdp_cmdq_send(struct mdp_dev *mdp, struct mdp_cmdq_param *param) 540 + static struct mdp_cmdq_cmd *mdp_cmdq_prepare(struct mdp_dev *mdp, 541 + struct mdp_cmdq_param *param, 542 + u8 pp_idx) 590 543 { 591 544 struct mdp_path *path = NULL; 592 545 struct mdp_cmdq_cmd *cmd = NULL; 593 546 struct mdp_comp *comps = NULL; 594 547 struct device *dev = &mdp->pdev->dev; 595 548 const int p_id = mdp->mdp_data->mdp_plat_id; 549 + struct img_config *config; 596 550 struct mtk_mutex *mutex = NULL; 597 551 enum mdp_pipe_id pipe_id; 598 - int i, ret; 599 - u32 num_comp = 0; 552 + int i, ret = -ECANCELED; 553 + u32 num_comp; 600 554 601 - atomic_inc(&mdp->job_count); 602 - if (atomic_read(&mdp->suspended)) { 603 - atomic_dec(&mdp->job_count); 604 - return -ECANCELED; 555 + config = __get_config_offset(mdp, param, pp_idx); 556 + if (IS_ERR(config)) { 557 + ret = PTR_ERR(config); 558 + goto err_uninit; 605 559 } 560 + 561 + if (CFG_CHECK(MT8183, p_id)) 562 + num_comp = CFG_GET(MT8183, config, num_components); 563 + else if (CFG_CHECK(MT8195, p_id)) 564 + num_comp = CFG_GET(MT8195, config, num_components); 565 + else 566 + goto err_uninit; 606 567 607 568 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 608 569 if (!cmd) { 609 570 ret = -ENOMEM; 610 - goto err_cancel_job; 571 + goto err_uninit; 611 572 } 612 573 613 - ret = mdp_cmdq_pkt_create(mdp->cmdq_clt, &cmd->pkt, SZ_16K); 574 + ret = mdp_cmdq_pkt_create(mdp->cmdq_clt[pp_idx], &cmd->pkt, SZ_16K); 614 575 if (ret) 615 576 goto err_free_cmd; 616 577 ··· 645 586 } 646 587 647 588 path->mdp_dev = mdp; 648 - path->config = param->config; 589 + path->config = config; 649 590 path->param = param->param; 650 591 for (i = 0; i < param->param->num_outputs; i++) { 651 592 path->bounds[i].left = 0; ··· 659 600 } 660 601 ret = mdp_path_ctx_init(mdp, path); 661 602 if (ret) { 662 - dev_err(dev, "mdp_path_ctx_init error\n"); 603 + dev_err(dev, "mdp_path_ctx_init error %d\n", pp_idx); 663 604 goto err_free_path; 664 605 } 665 606 ··· 667 608 mutex = __get_mutex(mdp, &mdp->mdp_data->pipe_info[pipe_id]); 668 609 ret = mtk_mutex_prepare(mutex); 669 610 if (ret) { 670 - dev_err(dev, "Fail to enable mutex clk\n"); 611 + dev_err(dev, "Fail to enable mutex %d clk\n", pp_idx); 671 612 goto err_free_path; 672 613 } 673 614 674 615 ret = mdp_path_config(mdp, cmd, path); 675 616 if (ret) { 676 - dev_err(dev, "mdp_path_config error\n"); 617 + dev_err(dev, "mdp_path_config error %d\n", pp_idx); 677 618 goto err_free_path; 678 619 } 679 620 cmdq_pkt_finalize(&cmd->pkt); ··· 692 633 sizeof(struct mdp_comp)); 693 634 } 694 635 695 - mdp->cmdq_clt->client.rx_callback = mdp_handle_cmdq_callback; 636 + mdp->cmdq_clt[pp_idx]->client.rx_callback = mdp_handle_cmdq_callback; 696 637 cmd->mdp = mdp; 697 638 cmd->user_cmdq_cb = param->cmdq_cb; 698 639 cmd->user_cb_data = param->cb_data; ··· 700 641 cmd->num_comps = num_comp; 701 642 cmd->mdp_ctx = param->mdp_ctx; 702 643 703 - ret = mdp_comp_clocks_on(&mdp->pdev->dev, cmd->comps, cmd->num_comps); 704 - if (ret) 705 - goto err_free_path; 706 - 707 - dma_sync_single_for_device(mdp->cmdq_clt->chan->mbox->dev, 708 - cmd->pkt.pa_base, cmd->pkt.cmd_buf_size, 709 - DMA_TO_DEVICE); 710 - ret = mbox_send_message(mdp->cmdq_clt->chan, &cmd->pkt); 711 - if (ret < 0) { 712 - dev_err(dev, "mbox send message fail %d!\n", ret); 713 - goto err_clock_off; 714 - } 715 - mbox_client_txdone(mdp->cmdq_clt->chan, 0); 716 - 717 644 kfree(path); 718 - return 0; 645 + return cmd; 719 646 720 - err_clock_off: 721 - mdp_comp_clocks_off(&mdp->pdev->dev, cmd->comps, 722 - cmd->num_comps); 723 647 err_free_path: 724 648 if (mutex) 725 649 mtk_mutex_unprepare(mutex); ··· 713 671 mdp_cmdq_pkt_destroy(&cmd->pkt); 714 672 err_free_cmd: 715 673 kfree(cmd); 674 + err_uninit: 675 + return ERR_PTR(ret); 676 + } 677 + 678 + int mdp_cmdq_send(struct mdp_dev *mdp, struct mdp_cmdq_param *param) 679 + { 680 + struct mdp_cmdq_cmd *cmd[MDP_PP_MAX] = {NULL}; 681 + struct device *dev = &mdp->pdev->dev; 682 + int i, ret; 683 + u8 pp_used = __get_pp_num(param->param->type); 684 + 685 + atomic_set(&mdp->job_count, pp_used); 686 + if (atomic_read(&mdp->suspended)) { 687 + atomic_set(&mdp->job_count, 0); 688 + return -ECANCELED; 689 + } 690 + 691 + for (i = 0; i < pp_used; i++) { 692 + cmd[i] = mdp_cmdq_prepare(mdp, param, i); 693 + if (IS_ERR_OR_NULL(cmd[i])) { 694 + ret = PTR_ERR(cmd[i]); 695 + goto err_cancel_job; 696 + } 697 + } 698 + 699 + for (i = 0; i < pp_used; i++) { 700 + ret = mdp_comp_clocks_on(&mdp->pdev->dev, cmd[i]->comps, cmd[i]->num_comps); 701 + if (ret) 702 + goto err_clock_off; 703 + } 704 + 705 + for (i = 0; i < pp_used; i++) { 706 + dma_sync_single_for_device(mdp->cmdq_clt[i]->chan->mbox->dev, 707 + cmd[i]->pkt.pa_base, cmd[i]->pkt.cmd_buf_size, 708 + DMA_TO_DEVICE); 709 + 710 + ret = mbox_send_message(mdp->cmdq_clt[i]->chan, &cmd[i]->pkt); 711 + if (ret < 0) { 712 + dev_err(dev, "mbox send message fail %d!\n", ret); 713 + i = pp_used; 714 + goto err_clock_off; 715 + } 716 + mbox_client_txdone(mdp->cmdq_clt[i]->chan, 0); 717 + } 718 + return 0; 719 + 720 + err_clock_off: 721 + while (--i >= 0) 722 + mdp_comp_clocks_off(&mdp->pdev->dev, cmd[i]->comps, 723 + cmd[i]->num_comps); 716 724 err_cancel_job: 717 - atomic_dec(&mdp->job_count); 725 + atomic_set(&mdp->job_count, 0); 718 726 719 727 return ret; 720 728 }
+1
drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.h
··· 29 29 struct cmdq_pkt pkt; 30 30 s32 *event; 31 31 struct mdp_dev *mdp; 32 + struct cmdq_cb_data *data; 32 33 void (*user_cmdq_cb)(struct cmdq_cb_data data); 33 34 void *user_cb_data; 34 35 struct mdp_comp *comps;
+12 -6
drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c
··· 142 142 struct mdp_dev *mdp = (struct mdp_dev *)video_get_drvdata(vdev); 143 143 int i; 144 144 145 + for (i = 0; i < mdp->mdp_data->pp_used; i++) 146 + if (mdp->cmdq_clt[i]) 147 + cmdq_mbox_destroy(mdp->cmdq_clt[i]); 148 + 145 149 scp_put(mdp->scp); 146 150 147 151 destroy_workqueue(mdp->job_wq); ··· 303 299 mutex_init(&mdp->vpu_lock); 304 300 mutex_init(&mdp->m2m_lock); 305 301 306 - mdp->cmdq_clt = cmdq_mbox_create(dev, 0); 307 - if (IS_ERR(mdp->cmdq_clt)) { 308 - ret = PTR_ERR(mdp->cmdq_clt); 309 - goto err_put_scp; 302 + for (i = 0; i < mdp->mdp_data->pp_used; i++) { 303 + mdp->cmdq_clt[i] = cmdq_mbox_create(dev, i); 304 + if (IS_ERR(mdp->cmdq_clt[i])) { 305 + ret = PTR_ERR(mdp->cmdq_clt[i]); 306 + goto err_mbox_destroy; 307 + } 310 308 } 311 309 312 310 init_waitqueue_head(&mdp->callback_wq); ··· 337 331 err_unregister_device: 338 332 v4l2_device_unregister(&mdp->v4l2_dev); 339 333 err_mbox_destroy: 340 - cmdq_mbox_destroy(mdp->cmdq_clt); 341 - err_put_scp: 334 + while (--i >= 0) 335 + cmdq_mbox_destroy(mdp->cmdq_clt[i]); 342 336 scp_put(mdp->scp); 343 337 err_destroy_clock_wq: 344 338 destroy_workqueue(mdp->clock_wq);
+11 -1
drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.h
··· 77 77 MDP_PIPE_MAX 78 78 }; 79 79 80 + /* MDP parallel pipe control */ 81 + enum { 82 + MDP_PP_USED_1 = 1, 83 + MDP_PP_USED_2 = 2, 84 + }; 85 + 86 + #define MDP_PP_MAX MDP_PP_USED_2 87 + 80 88 struct mtk_mdp_driver_data { 81 89 const int mdp_plat_id; 82 90 const resource_size_t mdp_con_res; ··· 99 91 const struct mdp_limit *def_limit; 100 92 const struct mdp_pipe_info *pipe_info; 101 93 unsigned int pipe_info_len; 94 + const struct v4l2_rect *pp_criteria; 95 + const u8 pp_used; 102 96 }; 103 97 104 98 struct mdp_mm_subsys { ··· 125 115 s32 vpu_count; 126 116 u32 id_count; 127 117 struct ida mdp_ida; 128 - struct cmdq_client *cmdq_clt; 118 + struct cmdq_client *cmdq_clt[MDP_PP_MAX]; 129 119 wait_queue_head_t callback_wq; 130 120 131 121 struct v4l2_device v4l2_dev;
+15
drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c
··· 87 87 dst_vb = v4l2_m2m_next_dst_buf(ctx->m2m_ctx); 88 88 mdp_set_dst_config(&param.outputs[0], frame, &dst_vb->vb2_buf); 89 89 90 + if (mdp_check_pp_enable(ctx->mdp_dev, frame)) 91 + param.type = MDP_STREAM_TYPE_DUAL_BITBLT; 92 + 90 93 ret = mdp_vpu_process(&ctx->mdp_dev->vpu, &param); 91 94 if (ret) { 92 95 dev_err(&ctx->mdp_dev->pdev->dev, ··· 103 100 task.cmdq_cb = NULL; 104 101 task.cb_data = NULL; 105 102 task.mdp_ctx = ctx; 103 + 104 + if (atomic_read(&ctx->mdp_dev->job_count)) { 105 + ret = wait_event_timeout(ctx->mdp_dev->callback_wq, 106 + !atomic_read(&ctx->mdp_dev->job_count), 107 + 2 * HZ); 108 + if (ret == 0) { 109 + dev_err(&ctx->mdp_dev->pdev->dev, 110 + "%d jobs not yet done\n", 111 + atomic_read(&ctx->mdp_dev->job_count)); 112 + goto worker_end; 113 + } 114 + } 106 115 107 116 ret = mdp_cmdq_send(ctx->mdp_dev, &task); 108 117 if (ret) {
+18
drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.c
··· 304 304 return 0; 305 305 } 306 306 307 + bool mdp_check_pp_enable(struct mdp_dev *mdp, struct mdp_frame *frame) 308 + { 309 + u32 s, r1, r2; 310 + 311 + if (!mdp || !frame) 312 + return false; 313 + 314 + if (!mdp->mdp_data->pp_criteria) 315 + return false; 316 + 317 + s = mdp->mdp_data->pp_criteria->width * 318 + mdp->mdp_data->pp_criteria->height; 319 + r1 = frame->crop.c.width * frame->crop.c.height; 320 + r2 = frame->compose.width * frame->compose.height; 321 + 322 + return (r1 >= s || r2 >= s); 323 + } 324 + 307 325 /* Stride that is accepted by MDP HW */ 308 326 static u32 mdp_fmt_get_stride(const struct mdp_format *fmt, 309 327 u32 bytesperline, unsigned int plane)
+1
drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.h
··· 368 368 int mdp_check_scaling_ratio(const struct v4l2_rect *crop, 369 369 const struct v4l2_rect *compose, s32 rotation, 370 370 const struct mdp_limit *limit); 371 + bool mdp_check_pp_enable(struct mdp_dev *mdp, struct mdp_frame *frame); 371 372 void mdp_set_src_config(struct img_input *in, 372 373 struct mdp_frame *frame, struct vb2_buffer *vb); 373 374 void mdp_set_dst_config(struct img_output *out,
+2 -1
drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.c
··· 198 198 }; 199 199 struct mdp_dev *mdp = vpu_to_mdp(vpu); 200 200 int err; 201 + u8 pp_num = mdp->mdp_data->pp_used; 201 202 202 203 init_completion(&vpu->ipi_acked); 203 204 vpu->scp = scp; ··· 212 211 mutex_lock(vpu->lock); 213 212 vpu->work_size = ALIGN(vpu->work_size, 64); 214 213 vpu->param_size = ALIGN(sizeof(struct img_ipi_frameparam), 64); 215 - vpu->config_size = ALIGN(sizeof(struct img_config), 64); 214 + vpu->config_size = ALIGN(sizeof(struct img_config) * pp_num, 64); 216 215 err = mdp_vpu_shared_mem_alloc(vpu); 217 216 mutex_unlock(vpu->lock); 218 217 if (err) {