Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-next-du-20170803' of git://linuxtv.org/pinchartl/media into drm-next

rcar-du updates, contains vsp1 updates as well.

* tag 'drm-next-du-20170803' of git://linuxtv.org/pinchartl/media: (24 commits)
drm: rcar-du: Use new iterator macros
drm: rcar-du: Repair vblank for DRM page flips using the VSP
drm: rcar-du: Fix race condition when disabling planes at CRTC stop
drm: rcar-du: Wait for flip completion instead of vblank in commit tail
drm: rcar-du: Use the VBK interrupt for vblank events
drm: rcar-du: Add HDMI outputs to R8A7796 device description
drm: rcar-du: Remove an unneeded NULL check
drm: rcar-du: Setup planes before enabling CRTC to avoid flicker
drm: rcar-du: Configure DPAD0 routing through last group on Gen3
drm: rcar-du: Restrict DPLL duty cycle workaround to H3 ES1.x
drm: rcar-du: Support multiple sources from the same VSP
drm: rcar-du: Fix comments to comply with the kernel coding style
drm: rcar-du: Use of_graph_get_remote_endpoint()
v4l: vsp1: Add support for header display lists in continuous mode
v4l: vsp1: Add support for multiple DRM pipelines
v4l: vsp1: Add support for multiple LIF instances
v4l: vsp1: Add support for new VSP2-BS, VSP2-DL and VSP2-D instances
v4l: vsp1: Add support for the BRS entity
v4l: vsp1: Add pipe index argument to the VSP-DU API
v4l: vsp1: Don't create links for DRM pipeline
...

+957 -549
+138 -51
drivers/gpu/drm/rcar-du/rcar_du_crtc.c
··· 13 13 14 14 #include <linux/clk.h> 15 15 #include <linux/mutex.h> 16 + #include <linux/sys_soc.h> 16 17 17 18 #include <drm/drmP.h> 18 19 #include <drm/drm_atomic.h> ··· 130 129 for (fdpll = 1; fdpll < 32; fdpll++) { 131 130 unsigned long output; 132 131 133 - /* 1/2 (FRQSEL=1) for duty rate 50% */ 134 132 output = input * (n + 1) / (m + 1) 135 - / (fdpll + 1) / 2; 136 - 133 + / (fdpll + 1); 137 134 if (output >= 400000000) 138 135 continue; 139 136 ··· 157 158 best_diff); 158 159 } 159 160 161 + static const struct soc_device_attribute rcar_du_r8a7795_es1[] = { 162 + { .soc_id = "r8a7795", .revision = "ES1.*" }, 163 + { /* sentinel */ } 164 + }; 165 + 160 166 static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) 161 167 { 162 168 const struct drm_display_mode *mode = &rcrtc->crtc.state->adjusted_mode; ··· 172 168 u32 escr; 173 169 u32 div; 174 170 175 - /* Compute the clock divisor and select the internal or external dot 171 + /* 172 + * Compute the clock divisor and select the internal or external dot 176 173 * clock based on the requested frequency. 177 174 */ 178 175 clk = clk_get_rate(rcrtc->clock); ··· 190 185 191 186 extclk = clk_get_rate(rcrtc->extclock); 192 187 if (rcdu->info->dpll_ch & (1 << rcrtc->index)) { 193 - rcar_du_dpll_divider(rcrtc, &dpll, extclk, mode_clock); 188 + unsigned long target = mode_clock; 189 + 190 + /* 191 + * The H3 ES1.x exhibits dot clock duty cycle stability 192 + * issues. We can work around them by configuring the 193 + * DPLL to twice the desired frequency, coupled with a 194 + * /2 post-divider. This isn't needed on other SoCs and 195 + * breaks HDMI output on M3-W for a currently unknown 196 + * reason, so restrict the workaround to H3 ES1.x. 197 + */ 198 + if (soc_device_match(rcar_du_r8a7795_es1)) 199 + target *= 2; 200 + 201 + rcar_du_dpll_divider(rcrtc, &dpll, extclk, target); 194 202 extclk = dpll.output; 195 203 } 196 204 ··· 215 197 216 198 if (abs((long)extrate - (long)mode_clock) < 217 199 abs((long)rate - (long)mode_clock)) { 218 - dev_dbg(rcrtc->group->dev->dev, 219 - "crtc%u: using external clock\n", rcrtc->index); 220 200 221 201 if (rcdu->info->dpll_ch & (1 << rcrtc->index)) { 222 202 u32 dpllcr = DPLLCR_CODE | DPLLCR_CLKE ··· 231 215 232 216 rcar_du_group_write(rcrtc->group, DPLLCR, 233 217 dpllcr); 234 - 235 - escr = ESCR_DCLKSEL_DCLKIN | 1; 236 - } else { 237 - escr = ESCR_DCLKSEL_DCLKIN | extdiv; 238 218 } 219 + 220 + escr = ESCR_DCLKSEL_DCLKIN | extdiv; 239 221 } 222 + 223 + dev_dbg(rcrtc->group->dev->dev, 224 + "mode clock %lu extrate %lu rate %lu ESCR 0x%08x\n", 225 + mode_clock, extrate, rate, escr); 240 226 } 241 227 242 228 rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? ESCR2 : ESCR, ··· 279 261 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); 280 262 struct rcar_du_device *rcdu = rcrtc->group->dev; 281 263 282 - /* Store the route from the CRTC output to the DU output. The DU will be 264 + /* 265 + * Store the route from the CRTC output to the DU output. The DU will be 283 266 * configured when starting the CRTC. 284 267 */ 285 268 rcrtc->outputs |= BIT(output); 286 269 287 - /* Store RGB routing to DPAD0, the hardware will be configured when 270 + /* 271 + * Store RGB routing to DPAD0, the hardware will be configured when 288 272 * starting the CRTC. 289 273 */ 290 274 if (output == RCAR_DU_OUTPUT_DPAD0) ··· 362 342 } 363 343 } 364 344 365 - /* Update the planes to display timing and dot clock generator 345 + /* 346 + * Update the planes to display timing and dot clock generator 366 347 * associations. 367 348 * 368 349 * Updating the DPTSR register requires restarting the CRTC group, ··· 452 431 * Start/Stop and Suspend/Resume 453 432 */ 454 433 455 - static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc) 434 + static void rcar_du_crtc_setup(struct rcar_du_crtc *rcrtc) 456 435 { 457 - struct drm_crtc *crtc = &rcrtc->crtc; 458 - bool interlaced; 459 - 460 - if (rcrtc->started) 461 - return; 462 - 463 436 /* Set display off and background to black */ 464 437 rcar_du_crtc_write(rcrtc, DOOR, DOOR_RGB(0, 0, 0)); 465 438 rcar_du_crtc_write(rcrtc, BPOR, BPOR_RGB(0, 0, 0)); ··· 465 450 /* Start with all planes disabled. */ 466 451 rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR, 0); 467 452 468 - /* Select master sync mode. This enables display operation in master 453 + /* Enable the VSP compositor. */ 454 + if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE)) 455 + rcar_du_vsp_enable(rcrtc); 456 + 457 + /* Turn vertical blanking interrupt reporting on. */ 458 + drm_crtc_vblank_on(&rcrtc->crtc); 459 + } 460 + 461 + static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc) 462 + { 463 + bool interlaced; 464 + 465 + /* 466 + * Select master sync mode. This enables display operation in master 469 467 * sync mode (with the HSYNC and VSYNC signals configured as outputs and 470 468 * actively driven). 471 469 */ ··· 488 460 DSYSR_TVM_MASTER); 489 461 490 462 rcar_du_group_start_stop(rcrtc->group, true); 463 + } 491 464 492 - /* Enable the VSP compositor. */ 493 - if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE)) 494 - rcar_du_vsp_enable(rcrtc); 465 + static void rcar_du_crtc_disable_planes(struct rcar_du_crtc *rcrtc) 466 + { 467 + struct rcar_du_device *rcdu = rcrtc->group->dev; 468 + struct drm_crtc *crtc = &rcrtc->crtc; 469 + u32 status; 495 470 496 - /* Turn vertical blanking interrupt reporting back on. */ 497 - drm_crtc_vblank_on(crtc); 471 + /* Make sure vblank interrupts are enabled. */ 472 + drm_crtc_vblank_get(crtc); 498 473 499 - rcrtc->started = true; 474 + /* 475 + * Disable planes and calculate how many vertical blanking interrupts we 476 + * have to wait for. If a vertical blanking interrupt has been triggered 477 + * but not processed yet, we don't know whether it occurred before or 478 + * after the planes got disabled. We thus have to wait for two vblank 479 + * interrupts in that case. 480 + */ 481 + spin_lock_irq(&rcrtc->vblank_lock); 482 + rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR, 0); 483 + status = rcar_du_crtc_read(rcrtc, DSSR); 484 + rcrtc->vblank_count = status & DSSR_VBK ? 2 : 1; 485 + spin_unlock_irq(&rcrtc->vblank_lock); 486 + 487 + if (!wait_event_timeout(rcrtc->vblank_wait, rcrtc->vblank_count == 0, 488 + msecs_to_jiffies(100))) 489 + dev_warn(rcdu->dev, "vertical blanking timeout\n"); 490 + 491 + drm_crtc_vblank_put(crtc); 500 492 } 501 493 502 494 static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc) 503 495 { 504 496 struct drm_crtc *crtc = &rcrtc->crtc; 505 497 506 - if (!rcrtc->started) 507 - return; 508 - 509 - /* Disable all planes and wait for the change to take effect. This is 510 - * required as the DSnPR registers are updated on vblank, and no vblank 511 - * will occur once the CRTC is stopped. Disabling planes when starting 512 - * the CRTC thus wouldn't be enough as it would start scanning out 513 - * immediately from old frame buffers until the next vblank. 498 + /* 499 + * Disable all planes and wait for the change to take effect. This is 500 + * required as the plane enable registers are updated on vblank, and no 501 + * vblank will occur once the CRTC is stopped. Disabling planes when 502 + * starting the CRTC thus wouldn't be enough as it would start scanning 503 + * out immediately from old frame buffers until the next vblank. 514 504 * 515 505 * This increases the CRTC stop delay, especially when multiple CRTCs 516 506 * are stopped in one operation as we now wait for one vblank per CRTC. 517 507 * Whether this can be improved needs to be researched. 518 508 */ 519 - rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR, 0); 520 - drm_crtc_wait_one_vblank(crtc); 509 + rcar_du_crtc_disable_planes(rcrtc); 521 510 522 - /* Disable vertical blanking interrupt reporting. We first need to wait 511 + /* 512 + * Disable vertical blanking interrupt reporting. We first need to wait 523 513 * for page flip completion before stopping the CRTC as userspace 524 514 * expects page flips to eventually complete. 525 515 */ ··· 548 502 if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE)) 549 503 rcar_du_vsp_disable(rcrtc); 550 504 551 - /* Select switch sync mode. This stops display operation and configures 505 + /* 506 + * Select switch sync mode. This stops display operation and configures 552 507 * the HSYNC and VSYNC signals as inputs. 553 508 */ 554 509 rcar_du_crtc_clr_set(rcrtc, DSYSR, DSYSR_TVM_MASK, DSYSR_TVM_SWITCH); 555 510 556 511 rcar_du_group_start_stop(rcrtc->group, false); 557 - 558 - rcrtc->started = false; 559 512 } 560 513 561 514 void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc) ··· 574 529 return; 575 530 576 531 rcar_du_crtc_get(rcrtc); 577 - rcar_du_crtc_start(rcrtc); 532 + rcar_du_crtc_setup(rcrtc); 578 533 579 534 /* Commit the planes state. */ 580 - if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE)) { 581 - rcar_du_vsp_enable(rcrtc); 582 - } else { 535 + if (!rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE)) { 583 536 for (i = 0; i < rcrtc->group->num_planes; ++i) { 584 537 struct rcar_du_plane *plane = &rcrtc->group->planes[i]; 585 538 ··· 589 546 } 590 547 591 548 rcar_du_crtc_update_planes(rcrtc); 549 + rcar_du_crtc_start(rcrtc); 592 550 } 593 551 594 552 /* ----------------------------------------------------------------------------- ··· 601 557 { 602 558 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); 603 559 604 - rcar_du_crtc_get(rcrtc); 560 + /* 561 + * If the CRTC has already been setup by the .atomic_begin() handler we 562 + * can skip the setup stage. 563 + */ 564 + if (!rcrtc->initialized) { 565 + rcar_du_crtc_get(rcrtc); 566 + rcar_du_crtc_setup(rcrtc); 567 + rcrtc->initialized = true; 568 + } 569 + 605 570 rcar_du_crtc_start(rcrtc); 606 571 } 607 572 ··· 629 576 } 630 577 spin_unlock_irq(&crtc->dev->event_lock); 631 578 579 + rcrtc->initialized = false; 632 580 rcrtc->outputs = 0; 633 581 } 634 582 ··· 637 583 struct drm_crtc_state *old_crtc_state) 638 584 { 639 585 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); 586 + 587 + WARN_ON(!crtc->state->enable); 588 + 589 + /* 590 + * If a mode set is in progress we can be called with the CRTC disabled. 591 + * We then need to first setup the CRTC in order to configure planes. 592 + * The .atomic_enable() handler will notice and skip the CRTC setup. 593 + */ 594 + if (!rcrtc->initialized) { 595 + rcar_du_crtc_get(rcrtc); 596 + rcar_du_crtc_setup(rcrtc); 597 + rcrtc->initialized = true; 598 + } 640 599 641 600 if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE)) 642 601 rcar_du_vsp_atomic_begin(rcrtc); ··· 690 623 691 624 rcar_du_crtc_write(rcrtc, DSRCR, DSRCR_VBCL); 692 625 rcar_du_crtc_set(rcrtc, DIER, DIER_VBE); 626 + rcrtc->vblank_enable = true; 693 627 694 628 return 0; 695 629 } ··· 700 632 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); 701 633 702 634 rcar_du_crtc_clr(rcrtc, DIER, DIER_VBE); 635 + rcrtc->vblank_enable = false; 703 636 } 704 637 705 638 static const struct drm_crtc_funcs crtc_funcs = { ··· 725 656 irqreturn_t ret = IRQ_NONE; 726 657 u32 status; 727 658 659 + spin_lock(&rcrtc->vblank_lock); 660 + 728 661 status = rcar_du_crtc_read(rcrtc, DSSR); 729 662 rcar_du_crtc_write(rcrtc, DSRCR, status & DSRCR_MASK); 730 663 731 - if (status & DSSR_FRM) { 732 - drm_crtc_handle_vblank(&rcrtc->crtc); 664 + if (status & DSSR_VBK) { 665 + /* 666 + * Wake up the vblank wait if the counter reaches 0. This must 667 + * be protected by the vblank_lock to avoid races in 668 + * rcar_du_crtc_disable_planes(). 669 + */ 670 + if (rcrtc->vblank_count) { 671 + if (--rcrtc->vblank_count == 0) 672 + wake_up(&rcrtc->vblank_wait); 673 + } 674 + } 733 675 734 - if (rcdu->info->gen < 3) 676 + spin_unlock(&rcrtc->vblank_lock); 677 + 678 + if (status & DSSR_VBK) { 679 + if (rcdu->info->gen < 3) { 680 + drm_crtc_handle_vblank(&rcrtc->crtc); 735 681 rcar_du_crtc_finish_page_flip(rcrtc); 682 + } 736 683 737 684 ret = IRQ_HANDLED; 738 685 } ··· 802 717 } 803 718 804 719 init_waitqueue_head(&rcrtc->flip_wait); 720 + init_waitqueue_head(&rcrtc->vblank_wait); 721 + spin_lock_init(&rcrtc->vblank_lock); 805 722 806 723 rcrtc->group = rgrp; 807 724 rcrtc->mmio_offset = mmio_offsets[index]; 808 725 rcrtc->index = index; 809 726 810 727 if (rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE)) 811 - primary = &rcrtc->vsp->planes[0].plane; 728 + primary = &rcrtc->vsp->planes[rcrtc->vsp_pipe].plane; 812 729 else 813 730 primary = &rgrp->planes[index % 2].plane; 814 731
+15 -2
drivers/gpu/drm/rcar-du/rcar_du_crtc.h
··· 15 15 #define __RCAR_DU_CRTC_H__ 16 16 17 17 #include <linux/mutex.h> 18 + #include <linux/spinlock.h> 18 19 #include <linux/wait.h> 19 20 20 21 #include <drm/drmP.h> ··· 31 30 * @extclock: external pixel dot clock (optional) 32 31 * @mmio_offset: offset of the CRTC registers in the DU MMIO block 33 32 * @index: CRTC software and hardware index 34 - * @started: whether the CRTC has been started and is running 33 + * @initialized: whether the CRTC has been initialized and clocks enabled 34 + * @vblank_enable: whether vblank events are enabled on this CRTC 35 35 * @event: event to post when the pending page flip completes 36 36 * @flip_wait: wait queue used to signal page flip completion 37 + * @vblank_lock: protects vblank_wait and vblank_count 38 + * @vblank_wait: wait queue used to signal vertical blanking 39 + * @vblank_count: number of vertical blanking interrupts to wait for 37 40 * @outputs: bitmask of the outputs (enum rcar_du_output) driven by this CRTC 38 41 * @group: CRTC group this CRTC belongs to 42 + * @vsp: VSP feeding video to this CRTC 43 + * @vsp_pipe: index of the VSP pipeline feeding video to this CRTC 39 44 */ 40 45 struct rcar_du_crtc { 41 46 struct drm_crtc crtc; ··· 50 43 struct clk *extclock; 51 44 unsigned int mmio_offset; 52 45 unsigned int index; 53 - bool started; 46 + bool initialized; 54 47 48 + bool vblank_enable; 55 49 struct drm_pending_vblank_event *event; 56 50 wait_queue_head_t flip_wait; 51 + 52 + spinlock_t vblank_lock; 53 + wait_queue_head_t vblank_wait; 54 + unsigned int vblank_count; 57 55 58 56 unsigned int outputs; 59 57 60 58 struct rcar_du_group *group; 61 59 struct rcar_du_vsp *vsp; 60 + unsigned int vsp_pipe; 62 61 }; 63 62 64 63 #define to_rcar_crtc(c) container_of(c, struct rcar_du_crtc, crtc)
+20 -8
drivers/gpu/drm/rcar-du/rcar_du_drv.c
··· 39 39 .features = 0, 40 40 .num_crtcs = 2, 41 41 .routes = { 42 - /* R8A7779 has two RGB outputs and one (currently unsupported) 42 + /* 43 + * R8A7779 has two RGB outputs and one (currently unsupported) 43 44 * TCON output. 44 45 */ 45 46 [RCAR_DU_OUTPUT_DPAD0] = { ··· 62 61 .quirks = RCAR_DU_QUIRK_ALIGN_128B | RCAR_DU_QUIRK_LVDS_LANES, 63 62 .num_crtcs = 3, 64 63 .routes = { 65 - /* R8A7790 has one RGB output, two LVDS outputs and one 64 + /* 65 + * R8A7790 has one RGB output, two LVDS outputs and one 66 66 * (currently unsupported) TCON output. 67 67 */ 68 68 [RCAR_DU_OUTPUT_DPAD0] = { ··· 89 87 | RCAR_DU_FEATURE_EXT_CTRL_REGS, 90 88 .num_crtcs = 2, 91 89 .routes = { 92 - /* R8A779[13] has one RGB output, one LVDS output and one 90 + /* 91 + * R8A779[13] has one RGB output, one LVDS output and one 93 92 * (currently unsupported) TCON output. 94 93 */ 95 94 [RCAR_DU_OUTPUT_DPAD0] = { ··· 130 127 | RCAR_DU_FEATURE_EXT_CTRL_REGS, 131 128 .num_crtcs = 2, 132 129 .routes = { 133 - /* R8A7794 has two RGB outputs and one (currently unsupported) 130 + /* 131 + * R8A7794 has two RGB outputs and one (currently unsupported) 134 132 * TCON output. 135 133 */ 136 134 [RCAR_DU_OUTPUT_DPAD0] = { ··· 153 149 | RCAR_DU_FEATURE_VSP1_SOURCE, 154 150 .num_crtcs = 4, 155 151 .routes = { 156 - /* R8A7795 has one RGB output, two HDMI outputs and one 152 + /* 153 + * R8A7795 has one RGB output, two HDMI outputs and one 157 154 * LVDS output. 158 155 */ 159 156 [RCAR_DU_OUTPUT_DPAD0] = { ··· 185 180 | RCAR_DU_FEATURE_VSP1_SOURCE, 186 181 .num_crtcs = 3, 187 182 .routes = { 188 - /* R8A7796 has one RGB output, one LVDS output and one 189 - * (currently unsupported) HDMI output. 183 + /* 184 + * R8A7796 has one RGB output, one LVDS output and one HDMI 185 + * output. 190 186 */ 191 187 [RCAR_DU_OUTPUT_DPAD0] = { 192 188 .possible_crtcs = BIT(2), 193 189 .port = 0, 190 + }, 191 + [RCAR_DU_OUTPUT_HDMI0] = { 192 + .possible_crtcs = BIT(1), 193 + .port = 1, 194 194 }, 195 195 [RCAR_DU_OUTPUT_LVDS0] = { 196 196 .possible_crtcs = BIT(0), ··· 203 193 }, 204 194 }, 205 195 .num_lvds = 1, 196 + .dpll_ch = BIT(1), 206 197 }; 207 198 208 199 static const struct of_device_id rcar_du_of_table[] = { ··· 352 341 353 342 ddev->irq_enabled = 1; 354 343 355 - /* Register the DRM device with the core and the connectors with 344 + /* 345 + * Register the DRM device with the core and the connectors with 356 346 * sysfs. 357 347 */ 358 348 ret = drm_dev_register(ddev, 0);
+26 -12
drivers/gpu/drm/rcar-du/rcar_du_group.c
··· 64 64 if (rcdu->info->gen < 3) { 65 65 defr8 |= DEFR8_DEFE8; 66 66 67 - /* On Gen2 the DEFR8 register for the first group also controls 67 + /* 68 + * On Gen2 the DEFR8 register for the first group also controls 68 69 * RGB output routing to DPAD0 and VSPD1 routing to DU0/1/2 for 69 70 * DU instances that support it. 70 71 */ ··· 76 75 defr8 |= DEFR8_VSCS; 77 76 } 78 77 } else { 79 - /* On Gen3 VSPD routing can't be configured, but DPAD routing 78 + /* 79 + * On Gen3 VSPD routing can't be configured, but DPAD routing 80 80 * needs to be set despite having a single option available. 81 81 */ 82 82 u32 crtc = ffs(possible_crtcs) - 1; ··· 126 124 if (rcdu->info->gen >= 3) 127 125 rcar_du_group_write(rgrp, DEFR10, DEFR10_CODE | DEFR10_DEFE10); 128 126 129 - /* Use DS1PR and DS2PR to configure planes priorities and connects the 127 + /* 128 + * Use DS1PR and DS2PR to configure planes priorities and connects the 130 129 * superposition 0 to DU0 pins. DU1 pins will be configured dynamically. 131 130 */ 132 131 rcar_du_group_write(rgrp, DORCR, DORCR_PG1D_DS1 | DORCR_DPRS); ··· 180 177 181 178 void rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start) 182 179 { 183 - /* Many of the configuration bits are only updated when the display 180 + /* 181 + * Many of the configuration bits are only updated when the display 184 182 * reset (DRES) bit in DSYSR is set to 1, disabling *both* CRTCs. Some 185 183 * of those bits could be pre-configured, but others (especially the 186 184 * bits related to plane assignment to display timing controllers) need ··· 212 208 213 209 int rcar_du_set_dpad0_vsp1_routing(struct rcar_du_device *rcdu) 214 210 { 211 + struct rcar_du_group *rgrp; 212 + struct rcar_du_crtc *crtc; 213 + unsigned int index; 215 214 int ret; 216 215 217 216 if (!rcar_du_has(rcdu, RCAR_DU_FEATURE_EXT_CTRL_REGS)) 218 217 return 0; 219 218 220 - /* RGB output routing to DPAD0 and VSP1D routing to DU0/1/2 are 221 - * configured in the DEFR8 register of the first group. As this function 222 - * can be called with the DU0 and DU1 CRTCs disabled, we need to enable 223 - * the first group clock before accessing the register. 219 + /* 220 + * RGB output routing to DPAD0 and VSP1D routing to DU0/1/2 are 221 + * configured in the DEFR8 register of the first group on Gen2 and the 222 + * last group on Gen3. As this function can be called with the DU 223 + * channels of the corresponding CRTCs disabled, we need to enable the 224 + * group clock before accessing the register. 224 225 */ 225 - ret = clk_prepare_enable(rcdu->crtcs[0].clock); 226 + index = rcdu->info->gen < 3 ? 0 : DIV_ROUND_UP(rcdu->num_crtcs, 2) - 1; 227 + rgrp = &rcdu->groups[index]; 228 + crtc = &rcdu->crtcs[index * 2]; 229 + 230 + ret = clk_prepare_enable(crtc->clock); 226 231 if (ret < 0) 227 232 return ret; 228 233 229 - rcar_du_group_setup_defr8(&rcdu->groups[0]); 234 + rcar_du_group_setup_defr8(rgrp); 230 235 231 - clk_disable_unprepare(rcdu->crtcs[0].clock); 236 + clk_disable_unprepare(crtc->clock); 232 237 233 238 return 0; 234 239 } ··· 249 236 250 237 dorcr &= ~(DORCR_PG2T | DORCR_DK2S | DORCR_PG2D_MASK); 251 238 252 - /* Set the DPAD1 pins sources. Select CRTC 0 if explicitly requested and 239 + /* 240 + * Set the DPAD1 pins sources. Select CRTC 0 if explicitly requested and 253 241 * CRTC 1 in all other cases to avoid cloning CRTC 0 to DPAD0 and DPAD1 254 242 * by default. 255 243 */
+93 -20
drivers/gpu/drm/rcar-du/rcar_du_kms.c
··· 96 96 .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC, 97 97 .edf = PnDDCR4_EDF_NONE, 98 98 }, 99 - /* The following formats are not supported on Gen2 and thus have no 99 + /* 100 + * The following formats are not supported on Gen2 and thus have no 100 101 * associated .pnmr or .edf settings. 101 102 */ 102 103 { ··· 154 153 unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); 155 154 unsigned int align; 156 155 157 - /* The R8A7779 DU requires a 16 pixels pitch alignment as documented, 156 + /* 157 + * The R8A7779 DU requires a 16 pixels pitch alignment as documented, 158 158 * but the R8A7790 DU seems to require a 128 bytes pitch alignment. 159 159 */ 160 160 if (rcar_du_needs(rcdu, RCAR_DU_QUIRK_ALIGN_128B)) ··· 257 255 258 256 /* Apply the atomic update. */ 259 257 drm_atomic_helper_commit_modeset_disables(dev, old_state); 260 - drm_atomic_helper_commit_modeset_enables(dev, old_state); 261 258 drm_atomic_helper_commit_planes(dev, old_state, 262 259 DRM_PLANE_COMMIT_ACTIVE_ONLY); 260 + drm_atomic_helper_commit_modeset_enables(dev, old_state); 263 261 264 262 drm_atomic_helper_commit_hw_done(old_state); 265 - drm_atomic_helper_wait_for_vblanks(dev, old_state); 263 + drm_atomic_helper_wait_for_flip_done(dev, old_state); 266 264 267 265 drm_atomic_helper_cleanup_planes(dev, old_state); 268 266 } ··· 311 309 return -ENODEV; 312 310 } 313 311 314 - entity_ep_node = of_parse_phandle(ep->local_node, "remote-endpoint", 0); 312 + entity_ep_node = of_graph_get_remote_endpoint(ep->local_node); 315 313 316 314 for_each_endpoint_of_node(entity, ep_node) { 317 315 if (ep_node == entity_ep_node) ··· 421 419 if (rcdu->props.alpha == NULL) 422 420 return -ENOMEM; 423 421 424 - /* The color key is expressed as an RGB888 triplet stored in a 32-bit 422 + /* 423 + * The color key is expressed as an RGB888 triplet stored in a 32-bit 425 424 * integer in XRGB8888 format. Bit 24 is used as a flag to disable (0) 426 425 * or enable source color keying (1). 427 426 */ ··· 433 430 return -ENOMEM; 434 431 435 432 return 0; 433 + } 434 + 435 + static int rcar_du_vsps_init(struct rcar_du_device *rcdu) 436 + { 437 + const struct device_node *np = rcdu->dev->of_node; 438 + struct of_phandle_args args; 439 + struct { 440 + struct device_node *np; 441 + unsigned int crtcs_mask; 442 + } vsps[RCAR_DU_MAX_VSPS] = { { 0, }, }; 443 + unsigned int vsps_count = 0; 444 + unsigned int cells; 445 + unsigned int i; 446 + int ret; 447 + 448 + /* 449 + * First parse the DT vsps property to populate the list of VSPs. Each 450 + * entry contains a pointer to the VSP DT node and a bitmask of the 451 + * connected DU CRTCs. 452 + */ 453 + cells = of_property_count_u32_elems(np, "vsps") / rcdu->num_crtcs - 1; 454 + if (cells > 1) 455 + return -EINVAL; 456 + 457 + for (i = 0; i < rcdu->num_crtcs; ++i) { 458 + unsigned int j; 459 + 460 + ret = of_parse_phandle_with_fixed_args(np, "vsps", cells, i, 461 + &args); 462 + if (ret < 0) 463 + goto error; 464 + 465 + /* 466 + * Add the VSP to the list or update the corresponding existing 467 + * entry if the VSP has already been added. 468 + */ 469 + for (j = 0; j < vsps_count; ++j) { 470 + if (vsps[j].np == args.np) 471 + break; 472 + } 473 + 474 + if (j < vsps_count) 475 + of_node_put(args.np); 476 + else 477 + vsps[vsps_count++].np = args.np; 478 + 479 + vsps[j].crtcs_mask |= BIT(i); 480 + 481 + /* Store the VSP pointer and pipe index in the CRTC. */ 482 + rcdu->crtcs[i].vsp = &rcdu->vsps[j]; 483 + rcdu->crtcs[i].vsp_pipe = cells >= 1 ? args.args[0] : 0; 484 + } 485 + 486 + /* 487 + * Then initialize all the VSPs from the node pointers and CRTCs bitmask 488 + * computed previously. 489 + */ 490 + for (i = 0; i < vsps_count; ++i) { 491 + struct rcar_du_vsp *vsp = &rcdu->vsps[i]; 492 + 493 + vsp->index = i; 494 + vsp->dev = rcdu; 495 + 496 + ret = rcar_du_vsp_init(vsp, vsps[i].np, vsps[i].crtcs_mask); 497 + if (ret < 0) 498 + goto error; 499 + } 500 + 501 + return 0; 502 + 503 + error: 504 + for (i = 0; i < ARRAY_SIZE(vsps); ++i) 505 + of_node_put(vsps[i].np); 506 + 507 + return ret; 436 508 } 437 509 438 510 int rcar_du_modeset_init(struct rcar_du_device *rcdu) ··· 539 461 if (ret < 0) 540 462 return ret; 541 463 542 - /* Initialize vertical blanking interrupts handling. Start with vblank 464 + /* 465 + * Initialize vertical blanking interrupts handling. Start with vblank 543 466 * disabled for all CRTCs. 544 467 */ 545 468 ret = drm_vblank_init(dev, (1 << rcdu->info->num_crtcs) - 1); ··· 560 481 rgrp->index = i; 561 482 rgrp->num_crtcs = min(rcdu->num_crtcs - 2 * i, 2U); 562 483 563 - /* If we have more than one CRTCs in this group pre-associate 484 + /* 485 + * If we have more than one CRTCs in this group pre-associate 564 486 * the low-order planes with CRTC 0 and the high-order planes 565 487 * with CRTC 1 to minimize flicker occurring when the 566 488 * association is changed. ··· 579 499 580 500 /* Initialize the compositors. */ 581 501 if (rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE)) { 582 - for (i = 0; i < rcdu->num_crtcs; ++i) { 583 - struct rcar_du_vsp *vsp = &rcdu->vsps[i]; 584 - 585 - vsp->index = i; 586 - vsp->dev = rcdu; 587 - rcdu->crtcs[i].vsp = vsp; 588 - 589 - ret = rcar_du_vsp_init(vsp); 590 - if (ret < 0) 591 - return ret; 592 - } 502 + ret = rcar_du_vsps_init(rcdu); 503 + if (ret < 0) 504 + return ret; 593 505 } 594 506 595 507 /* Create the CRTCs. */ ··· 609 537 610 538 num_encoders = ret; 611 539 612 - /* Set the possible CRTCs and possible clones. There's always at least 540 + /* 541 + * Set the possible CRTCs and possible clones. There's always at least 613 542 * one way for all encoders to clone each other, set all bits in the 614 543 * possible clones field. 615 544 */
+8 -4
drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
··· 59 59 60 60 rcar_lvds_write(lvds, LVDPLLCR, pllcr); 61 61 62 - /* Select the input, hardcode mode 0, enable LVDS operation and turn 62 + /* 63 + * Select the input, hardcode mode 0, enable LVDS operation and turn 63 64 * bias circuitry on. 64 65 */ 65 66 lvdcr0 = (lvds->mode << LVDCR0_LVMD_SHIFT) | LVDCR0_BEN | LVDCR0_LVEN; ··· 74 73 LVDCR1_CHSTBY_GEN2(1) | LVDCR1_CHSTBY_GEN2(0) | 75 74 LVDCR1_CLKSTBY_GEN2); 76 75 77 - /* Turn the PLL on, wait for the startup delay, and turn the output 76 + /* 77 + * Turn the PLL on, wait for the startup delay, and turn the output 78 78 * on. 79 79 */ 80 80 lvdcr0 |= LVDCR0_PLLON; ··· 142 140 if (ret < 0) 143 141 return ret; 144 142 145 - /* Hardcode the channels and control signals routing for now. 143 + /* 144 + * Hardcode the channels and control signals routing for now. 146 145 * 147 146 * HSYNC -> CTRL0 148 147 * VSYNC -> CTRL1 ··· 205 202 { 206 203 struct rcar_du_device *rcdu = lvds->dev; 207 204 208 - /* The internal LVDS encoder has a restricted clock frequency operating 205 + /* 206 + * The internal LVDS encoder has a restricted clock frequency operating 209 207 * range (30MHz to 150MHz on Gen2, 25.175MHz to 148.5MHz on Gen3). Clamp 210 208 * the clock accordingly. 211 209 */
+66 -48
drivers/gpu/drm/rcar-du/rcar_du_plane.c
··· 50 50 * automatically when the core swaps the old and new states. 51 51 */ 52 52 53 - static bool rcar_du_plane_needs_realloc(struct rcar_du_plane *plane, 54 - struct rcar_du_plane_state *new_state) 53 + static bool rcar_du_plane_needs_realloc( 54 + const struct rcar_du_plane_state *old_state, 55 + const struct rcar_du_plane_state *new_state) 55 56 { 56 - struct rcar_du_plane_state *cur_state; 57 - 58 - cur_state = to_rcar_plane_state(plane->plane.state); 59 - 60 - /* Lowering the number of planes doesn't strictly require reallocation 57 + /* 58 + * Lowering the number of planes doesn't strictly require reallocation 61 59 * as the extra hardware plane will be freed when committing, but doing 62 60 * so could lead to more fragmentation. 63 61 */ 64 - if (!cur_state->format || 65 - cur_state->format->planes != new_state->format->planes) 62 + if (!old_state->format || 63 + old_state->format->planes != new_state->format->planes) 66 64 return true; 67 65 68 66 /* Reallocate hardware planes if the source has changed. */ 69 - if (cur_state->source != new_state->source) 67 + if (old_state->source != new_state->source) 70 68 return true; 71 69 72 70 return false; ··· 139 141 unsigned int groups = 0; 140 142 unsigned int i; 141 143 struct drm_plane *drm_plane; 142 - struct drm_plane_state *drm_plane_state; 144 + struct drm_plane_state *old_drm_plane_state; 145 + struct drm_plane_state *new_drm_plane_state; 143 146 144 147 /* Check if hardware planes need to be reallocated. */ 145 - for_each_plane_in_state(state, drm_plane, drm_plane_state, i) { 146 - struct rcar_du_plane_state *plane_state; 148 + for_each_oldnew_plane_in_state(state, drm_plane, old_drm_plane_state, 149 + new_drm_plane_state, i) { 150 + struct rcar_du_plane_state *old_plane_state; 151 + struct rcar_du_plane_state *new_plane_state; 147 152 struct rcar_du_plane *plane; 148 153 unsigned int index; 149 154 150 155 plane = to_rcar_plane(drm_plane); 151 - plane_state = to_rcar_plane_state(drm_plane_state); 156 + old_plane_state = to_rcar_plane_state(old_drm_plane_state); 157 + new_plane_state = to_rcar_plane_state(new_drm_plane_state); 152 158 153 159 dev_dbg(rcdu->dev, "%s: checking plane (%u,%tu)\n", __func__, 154 160 plane->group->index, plane - plane->group->planes); 155 161 156 - /* If the plane is being disabled we don't need to go through 162 + /* 163 + * If the plane is being disabled we don't need to go through 157 164 * the full reallocation procedure. Just mark the hardware 158 165 * plane(s) as freed. 159 166 */ 160 - if (!plane_state->format) { 167 + if (!new_plane_state->format) { 161 168 dev_dbg(rcdu->dev, "%s: plane is being disabled\n", 162 169 __func__); 163 170 index = plane - plane->group->planes; 164 171 group_freed_planes[plane->group->index] |= 1 << index; 165 - plane_state->hwindex = -1; 172 + new_plane_state->hwindex = -1; 166 173 continue; 167 174 } 168 175 169 - /* If the plane needs to be reallocated mark it as such, and 176 + /* 177 + * If the plane needs to be reallocated mark it as such, and 170 178 * mark the hardware plane(s) as free. 171 179 */ 172 - if (rcar_du_plane_needs_realloc(plane, plane_state)) { 180 + if (rcar_du_plane_needs_realloc(old_plane_state, new_plane_state)) { 173 181 dev_dbg(rcdu->dev, "%s: plane needs reallocation\n", 174 182 __func__); 175 183 groups |= 1 << plane->group->index; ··· 183 179 184 180 index = plane - plane->group->planes; 185 181 group_freed_planes[plane->group->index] |= 1 << index; 186 - plane_state->hwindex = -1; 182 + new_plane_state->hwindex = -1; 187 183 } 188 184 } 189 185 190 186 if (!needs_realloc) 191 187 return 0; 192 188 193 - /* Grab all plane states for the groups that need reallocation to ensure 189 + /* 190 + * Grab all plane states for the groups that need reallocation to ensure 194 191 * locking and avoid racy updates. This serializes the update operation, 195 192 * but there's not much we can do about it as that's the hardware 196 193 * design. ··· 209 204 210 205 for (i = 0; i < group->num_planes; ++i) { 211 206 struct rcar_du_plane *plane = &group->planes[i]; 212 - struct rcar_du_plane_state *plane_state; 207 + struct rcar_du_plane_state *new_plane_state; 213 208 struct drm_plane_state *s; 214 209 215 210 s = drm_atomic_get_plane_state(state, &plane->plane); 216 211 if (IS_ERR(s)) 217 212 return PTR_ERR(s); 218 213 219 - /* If the plane has been freed in the above loop its 214 + /* 215 + * If the plane has been freed in the above loop its 220 216 * hardware planes must not be added to the used planes 221 217 * bitmask. However, the current state doesn't reflect 222 218 * the free state yet, as we've modified the new state ··· 232 226 continue; 233 227 } 234 228 235 - plane_state = to_rcar_plane_state(plane->plane.state); 236 - used_planes |= rcar_du_plane_hwmask(plane_state); 229 + new_plane_state = to_rcar_plane_state(s); 230 + used_planes |= rcar_du_plane_hwmask(new_plane_state); 237 231 238 232 dev_dbg(rcdu->dev, 239 233 "%s: plane (%u,%tu) uses %u hwplanes (index %d)\n", 240 234 __func__, plane->group->index, 241 235 plane - plane->group->planes, 242 - plane_state->format ? 243 - plane_state->format->planes : 0, 244 - plane_state->hwindex); 236 + new_plane_state->format ? 237 + new_plane_state->format->planes : 0, 238 + new_plane_state->hwindex); 245 239 } 246 240 247 241 group_free_planes[index] = 0xff & ~used_planes; ··· 252 246 } 253 247 254 248 /* Reallocate hardware planes for each plane that needs it. */ 255 - for_each_plane_in_state(state, drm_plane, drm_plane_state, i) { 256 - struct rcar_du_plane_state *plane_state; 249 + for_each_oldnew_plane_in_state(state, drm_plane, old_drm_plane_state, 250 + new_drm_plane_state, i) { 251 + struct rcar_du_plane_state *old_plane_state; 252 + struct rcar_du_plane_state *new_plane_state; 257 253 struct rcar_du_plane *plane; 258 254 unsigned int crtc_planes; 259 255 unsigned int free; 260 256 int idx; 261 257 262 258 plane = to_rcar_plane(drm_plane); 263 - plane_state = to_rcar_plane_state(drm_plane_state); 259 + old_plane_state = to_rcar_plane_state(old_drm_plane_state); 260 + new_plane_state = to_rcar_plane_state(new_drm_plane_state); 264 261 265 262 dev_dbg(rcdu->dev, "%s: allocating plane (%u,%tu)\n", __func__, 266 263 plane->group->index, plane - plane->group->planes); 267 264 268 - /* Skip planes that are being disabled or don't need to be 265 + /* 266 + * Skip planes that are being disabled or don't need to be 269 267 * reallocated. 270 268 */ 271 - if (!plane_state->format || 272 - !rcar_du_plane_needs_realloc(plane, plane_state)) 269 + if (!new_plane_state->format || 270 + !rcar_du_plane_needs_realloc(old_plane_state, new_plane_state)) 273 271 continue; 274 272 275 - /* Try to allocate the plane from the free planes currently 273 + /* 274 + * Try to allocate the plane from the free planes currently 276 275 * associated with the target CRTC to avoid restarting the CRTC 277 276 * group and thus minimize flicker. If it fails fall back to 278 277 * allocating from all free planes. 279 278 */ 280 - crtc_planes = to_rcar_crtc(plane_state->state.crtc)->index % 2 279 + crtc_planes = to_rcar_crtc(new_plane_state->state.crtc)->index % 2 281 280 ? plane->group->dptsr_planes 282 281 : ~plane->group->dptsr_planes; 283 282 free = group_free_planes[plane->group->index]; 284 283 285 - idx = rcar_du_plane_hwalloc(plane, plane_state, 284 + idx = rcar_du_plane_hwalloc(plane, new_plane_state, 286 285 free & crtc_planes); 287 286 if (idx < 0) 288 - idx = rcar_du_plane_hwalloc(plane, plane_state, 287 + idx = rcar_du_plane_hwalloc(plane, new_plane_state, 289 288 free); 290 289 if (idx < 0) { 291 290 dev_dbg(rcdu->dev, "%s: no available hardware plane\n", ··· 299 288 } 300 289 301 290 dev_dbg(rcdu->dev, "%s: allocated %u hwplanes (index %u)\n", 302 - __func__, plane_state->format->planes, idx); 291 + __func__, new_plane_state->format->planes, idx); 303 292 304 - plane_state->hwindex = idx; 293 + new_plane_state->hwindex = idx; 305 294 306 295 group_free_planes[plane->group->index] &= 307 - ~rcar_du_plane_hwmask(plane_state); 296 + ~rcar_du_plane_hwmask(new_plane_state); 308 297 309 298 dev_dbg(rcdu->dev, "%s: group %u free planes mask 0x%02x\n", 310 299 __func__, plane->group->index, ··· 362 351 dma[1] = 0; 363 352 } 364 353 365 - /* Memory pitch (expressed in pixels). Must be doubled for interlaced 354 + /* 355 + * Memory pitch (expressed in pixels). Must be doubled for interlaced 366 356 * operation with 32bpp formats. 367 357 */ 368 358 rcar_du_plane_write(rgrp, index, PnMWR, 369 359 (interlaced && state->format->bpp == 32) ? 370 360 pitch * 2 : pitch); 371 361 372 - /* The Y position is expressed in raster line units and must be doubled 362 + /* 363 + * The Y position is expressed in raster line units and must be doubled 373 364 * for 32bpp formats, according to the R8A7790 datasheet. No mention of 374 365 * doubling the Y position is found in the R8A7779 datasheet, but the 375 366 * rule seems to apply there as well. ··· 409 396 u32 colorkey; 410 397 u32 pnmr; 411 398 412 - /* The PnALPHAR register controls alpha-blending in 16bpp formats 399 + /* 400 + * The PnALPHAR register controls alpha-blending in 16bpp formats 413 401 * (ARGB1555 and XRGB1555). 414 402 * 415 403 * For ARGB, set the alpha value to 0, and enable alpha-blending when ··· 427 413 428 414 pnmr = PnMR_BM_MD | state->format->pnmr; 429 415 430 - /* Disable color keying when requested. YUV formats have the 416 + /* 417 + * Disable color keying when requested. YUV formats have the 431 418 * PnMR_SPIM_TP_OFF bit set in their pnmr field, disabling color keying 432 419 * automatically. 433 420 */ ··· 472 457 u32 ddcr2 = PnDDCR2_CODE; 473 458 u32 ddcr4; 474 459 475 - /* Data format 460 + /* 461 + * Data format 476 462 * 477 463 * The data format is selected by the DDDF field in PnMR and the EDF 478 464 * field in DDCR4. ··· 605 589 606 590 rcar_du_plane_setup(rplane); 607 591 608 - /* Check whether the source has changed from memory to live source or 592 + /* 593 + * Check whether the source has changed from memory to live source or 609 594 * from live source to memory. The source has been configured by the 610 595 * VSPS bit in the PnDDCR4 register. Although the datasheet states that 611 596 * the bit is updated during vertical blanking, it seems that updates ··· 743 726 unsigned int i; 744 727 int ret; 745 728 746 - /* Create one primary plane per CRTC in this group and seven overlay 729 + /* 730 + * Create one primary plane per CRTC in this group and seven overlay 747 731 * planes. 748 732 */ 749 733 rgrp->num_planes = rgrp->num_crtcs + 7;
+2 -1
drivers/gpu/drm/rcar-du/rcar_du_plane.h
··· 20 20 struct rcar_du_format_info; 21 21 struct rcar_du_group; 22 22 23 - /* The RCAR DU has 8 hardware planes, shared between primary and overlay planes. 23 + /* 24 + * The RCAR DU has 8 hardware planes, shared between primary and overlay planes. 24 25 * As using overlay planes requires at least one of the CRTCs being enabled, no 25 26 * more than 7 overlay planes can be available. We thus create 1 primary plane 26 27 * per CRTC and 7 overlay planes, for a total of up to 9 KMS planes.
+28 -23
drivers/gpu/drm/rcar-du/rcar_du_vsp.c
··· 19 19 #include <drm/drm_gem_cma_helper.h> 20 20 #include <drm/drm_plane_helper.h> 21 21 22 + #include <linux/bitops.h> 22 23 #include <linux/dma-mapping.h> 23 24 #include <linux/of_platform.h> 24 25 #include <linux/scatterlist.h> ··· 31 30 #include "rcar_du_kms.h" 32 31 #include "rcar_du_vsp.h" 33 32 34 - static void rcar_du_vsp_complete(void *private) 33 + static void rcar_du_vsp_complete(void *private, bool completed) 35 34 { 36 35 struct rcar_du_crtc *crtc = private; 37 36 38 - rcar_du_crtc_finish_page_flip(crtc); 37 + if (crtc->vblank_enable) 38 + drm_crtc_handle_vblank(&crtc->crtc); 39 + 40 + if (completed) 41 + rcar_du_crtc_finish_page_flip(crtc); 39 42 } 40 43 41 44 void rcar_du_vsp_enable(struct rcar_du_crtc *crtc) ··· 78 73 79 74 __rcar_du_plane_setup(crtc->group, &state); 80 75 81 - /* Ensure that the plane source configuration takes effect by requesting 76 + /* 77 + * Ensure that the plane source configuration takes effect by requesting 82 78 * a restart of the group. See rcar_du_plane_atomic_update() for a more 83 79 * detailed explanation. 84 80 * ··· 87 81 */ 88 82 crtc->group->need_restart = true; 89 83 90 - vsp1_du_setup_lif(crtc->vsp->vsp, &cfg); 84 + vsp1_du_setup_lif(crtc->vsp->vsp, crtc->vsp_pipe, &cfg); 91 85 } 92 86 93 87 void rcar_du_vsp_disable(struct rcar_du_crtc *crtc) 94 88 { 95 - vsp1_du_setup_lif(crtc->vsp->vsp, NULL); 89 + vsp1_du_setup_lif(crtc->vsp->vsp, crtc->vsp_pipe, NULL); 96 90 } 97 91 98 92 void rcar_du_vsp_atomic_begin(struct rcar_du_crtc *crtc) 99 93 { 100 - vsp1_du_atomic_begin(crtc->vsp->vsp); 94 + vsp1_du_atomic_begin(crtc->vsp->vsp, crtc->vsp_pipe); 101 95 } 102 96 103 97 void rcar_du_vsp_atomic_flush(struct rcar_du_crtc *crtc) 104 98 { 105 - vsp1_du_atomic_flush(crtc->vsp->vsp); 99 + vsp1_du_atomic_flush(crtc->vsp->vsp, crtc->vsp_pipe); 106 100 } 107 101 108 102 /* Keep the two tables in sync. */ ··· 168 162 { 169 163 struct rcar_du_vsp_plane_state *state = 170 164 to_rcar_vsp_plane_state(plane->plane.state); 165 + struct rcar_du_crtc *crtc = to_rcar_crtc(state->state.crtc); 171 166 struct drm_framebuffer *fb = plane->plane.state->fb; 172 167 struct vsp1_du_atomic_config cfg = { 173 168 .pixelformat = 0, ··· 199 192 } 200 193 } 201 194 202 - vsp1_du_atomic_update(plane->vsp->vsp, plane->index, &cfg); 195 + vsp1_du_atomic_update(plane->vsp->vsp, crtc->vsp_pipe, 196 + plane->index, &cfg); 203 197 } 204 198 205 199 static int rcar_du_vsp_plane_prepare_fb(struct drm_plane *plane, ··· 296 288 struct drm_plane_state *old_state) 297 289 { 298 290 struct rcar_du_vsp_plane *rplane = to_rcar_vsp_plane(plane); 291 + struct rcar_du_crtc *crtc = to_rcar_crtc(old_state->crtc); 299 292 300 293 if (plane->state->crtc) 301 294 rcar_du_vsp_plane_setup(rplane); 302 295 else 303 - vsp1_du_atomic_update(rplane->vsp->vsp, rplane->index, NULL); 296 + vsp1_du_atomic_update(rplane->vsp->vsp, crtc->vsp_pipe, 297 + rplane->index, NULL); 304 298 } 305 299 306 300 static const struct drm_plane_helper_funcs rcar_du_vsp_plane_helper_funcs = { ··· 401 391 .atomic_get_property = rcar_du_vsp_plane_atomic_get_property, 402 392 }; 403 393 404 - int rcar_du_vsp_init(struct rcar_du_vsp *vsp) 394 + int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np, 395 + unsigned int crtcs) 405 396 { 406 397 struct rcar_du_device *rcdu = vsp->dev; 407 398 struct platform_device *pdev; 408 - struct device_node *np; 399 + unsigned int num_crtcs = hweight32(crtcs); 409 400 unsigned int i; 410 401 int ret; 411 402 412 403 /* Find the VSP device and initialize it. */ 413 - np = of_parse_phandle(rcdu->dev->of_node, "vsps", vsp->index); 414 - if (!np) { 415 - dev_err(rcdu->dev, "vsps node not found\n"); 416 - return -ENXIO; 417 - } 418 - 419 404 pdev = of_find_device_by_node(np); 420 - of_node_put(np); 421 405 if (!pdev) 422 406 return -ENXIO; 423 407 ··· 421 417 if (ret < 0) 422 418 return ret; 423 419 424 - /* The VSP2D (Gen3) has 5 RPFs, but the VSP1D (Gen2) is limited to 420 + /* 421 + * The VSP2D (Gen3) has 5 RPFs, but the VSP1D (Gen2) is limited to 425 422 * 4 RPFs. 426 423 */ 427 424 vsp->num_planes = rcdu->info->gen >= 3 ? 5 : 4; ··· 433 428 return -ENOMEM; 434 429 435 430 for (i = 0; i < vsp->num_planes; ++i) { 436 - enum drm_plane_type type = i ? DRM_PLANE_TYPE_OVERLAY 437 - : DRM_PLANE_TYPE_PRIMARY; 431 + enum drm_plane_type type = i < num_crtcs 432 + ? DRM_PLANE_TYPE_PRIMARY 433 + : DRM_PLANE_TYPE_OVERLAY; 438 434 struct rcar_du_vsp_plane *plane = &vsp->planes[i]; 439 435 440 436 plane->vsp = vsp; 441 437 plane->index = i; 442 438 443 - ret = drm_universal_plane_init(rcdu->ddev, &plane->plane, 444 - 1 << vsp->index, 439 + ret = drm_universal_plane_init(rcdu->ddev, &plane->plane, crtcs, 445 440 &rcar_du_vsp_plane_funcs, 446 441 formats_kms, 447 442 ARRAY_SIZE(formats_kms), type,
+8 -2
drivers/gpu/drm/rcar-du/rcar_du_vsp.h
··· 64 64 } 65 65 66 66 #ifdef CONFIG_DRM_RCAR_VSP 67 - int rcar_du_vsp_init(struct rcar_du_vsp *vsp); 67 + int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np, 68 + unsigned int crtcs); 68 69 void rcar_du_vsp_enable(struct rcar_du_crtc *crtc); 69 70 void rcar_du_vsp_disable(struct rcar_du_crtc *crtc); 70 71 void rcar_du_vsp_atomic_begin(struct rcar_du_crtc *crtc); 71 72 void rcar_du_vsp_atomic_flush(struct rcar_du_crtc *crtc); 72 73 #else 73 - static inline int rcar_du_vsp_init(struct rcar_du_vsp *vsp) { return -ENXIO; }; 74 + static inline int rcar_du_vsp_init(struct rcar_du_vsp *vsp, 75 + struct device_node *np, 76 + unsigned int crtcs) 77 + { 78 + return -ENXIO; 79 + } 74 80 static inline void rcar_du_vsp_enable(struct rcar_du_crtc *crtc) { }; 75 81 static inline void rcar_du_vsp_disable(struct rcar_du_crtc *crtc) { }; 76 82 static inline void rcar_du_vsp_atomic_begin(struct rcar_du_crtc *crtc) { };
+1 -1
drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c
··· 45 45 { 46 46 const struct rcar_hdmi_phy_params *params = rcar_hdmi_phy_params; 47 47 48 - for (; params && params->mpixelclock != ~0UL; ++params) { 48 + for (; params->mpixelclock != ~0UL; ++params) { 49 49 if (mpixelclock <= params->mpixelclock) 50 50 break; 51 51 }
+5 -2
drivers/media/platform/vsp1/vsp1.h
··· 41 41 struct vsp1_sru; 42 42 struct vsp1_uds; 43 43 44 + #define VSP1_MAX_LIF 2 44 45 #define VSP1_MAX_RPF 5 45 46 #define VSP1_MAX_UDS 3 46 47 #define VSP1_MAX_WPF 4 47 48 48 - #define VSP1_HAS_LIF (1 << 0) 49 49 #define VSP1_HAS_LUT (1 << 1) 50 50 #define VSP1_HAS_SRU (1 << 2) 51 51 #define VSP1_HAS_BRU (1 << 3) ··· 54 54 #define VSP1_HAS_WPF_HFLIP (1 << 6) 55 55 #define VSP1_HAS_HGO (1 << 7) 56 56 #define VSP1_HAS_HGT (1 << 8) 57 + #define VSP1_HAS_BRS (1 << 9) 57 58 58 59 struct vsp1_device_info { 59 60 u32 version; 60 61 const char *model; 61 62 unsigned int gen; 62 63 unsigned int features; 64 + unsigned int lif_count; 63 65 unsigned int rpf_count; 64 66 unsigned int uds_count; 65 67 unsigned int wpf_count; ··· 78 76 struct rcar_fcp_device *fcp; 79 77 struct device *bus_master; 80 78 79 + struct vsp1_bru *brs; 81 80 struct vsp1_bru *bru; 82 81 struct vsp1_clu *clu; 83 82 struct vsp1_hgo *hgo; 84 83 struct vsp1_hgt *hgt; 85 84 struct vsp1_hsit *hsi; 86 85 struct vsp1_hsit *hst; 87 - struct vsp1_lif *lif; 86 + struct vsp1_lif *lif[VSP1_MAX_LIF]; 88 87 struct vsp1_lut *lut; 89 88 struct vsp1_rwpf *rpf[VSP1_MAX_RPF]; 90 89 struct vsp1_sru *sru;
+30 -15
drivers/media/platform/vsp1/vsp1_bru.c
··· 33 33 static inline void vsp1_bru_write(struct vsp1_bru *bru, struct vsp1_dl_list *dl, 34 34 u32 reg, u32 data) 35 35 { 36 - vsp1_dl_list_write(dl, reg, data); 36 + vsp1_dl_list_write(dl, bru->base + reg, data); 37 37 } 38 38 39 39 /* ----------------------------------------------------------------------------- ··· 332 332 /* 333 333 * Route BRU input 1 as SRC input to the ROP unit and configure the ROP 334 334 * unit with a NOP operation to make BRU input 1 available as the 335 - * Blend/ROP unit B SRC input. 335 + * Blend/ROP unit B SRC input. Only needed for BRU, the BRS has no ROP 336 + * unit. 336 337 */ 337 - vsp1_bru_write(bru, dl, VI6_BRU_ROP, VI6_BRU_ROP_DSTSEL_BRUIN(1) | 338 - VI6_BRU_ROP_CROP(VI6_ROP_NOP) | 339 - VI6_BRU_ROP_AROP(VI6_ROP_NOP)); 338 + if (entity->type == VSP1_ENTITY_BRU) 339 + vsp1_bru_write(bru, dl, VI6_BRU_ROP, 340 + VI6_BRU_ROP_DSTSEL_BRUIN(1) | 341 + VI6_BRU_ROP_CROP(VI6_ROP_NOP) | 342 + VI6_BRU_ROP_AROP(VI6_ROP_NOP)); 340 343 341 344 for (i = 0; i < bru->entity.source_pad; ++i) { 342 345 bool premultiplied = false; ··· 369 366 ctrl |= VI6_BRU_CTRL_DSTSEL_VRPF; 370 367 371 368 /* 372 - * Route BRU inputs 0 to 3 as SRC inputs to Blend/ROP units A to 373 - * D in that order. The Blend/ROP unit B SRC is hardwired to the 374 - * ROP unit output, the corresponding register bits must be set 375 - * to 0. 369 + * Route inputs 0 to 3 as SRC inputs to Blend/ROP units A to D 370 + * in that order. In the BRU the Blend/ROP unit B SRC is 371 + * hardwired to the ROP unit output, the corresponding register 372 + * bits must be set to 0. The BRS has no ROP unit and doesn't 373 + * need any special processing. 376 374 */ 377 - if (i != 1) 375 + if (!(entity->type == VSP1_ENTITY_BRU && i == 1)) 378 376 ctrl |= VI6_BRU_CTRL_SRCSEL_BRUIN(i); 379 377 380 378 vsp1_bru_write(bru, dl, VI6_BRU_CTRL(i), ctrl); ··· 411 407 * Initialization and Cleanup 412 408 */ 413 409 414 - struct vsp1_bru *vsp1_bru_create(struct vsp1_device *vsp1) 410 + struct vsp1_bru *vsp1_bru_create(struct vsp1_device *vsp1, 411 + enum vsp1_entity_type type) 415 412 { 416 413 struct vsp1_bru *bru; 414 + unsigned int num_pads; 415 + const char *name; 417 416 int ret; 418 417 419 418 bru = devm_kzalloc(vsp1->dev, sizeof(*bru), GFP_KERNEL); 420 419 if (bru == NULL) 421 420 return ERR_PTR(-ENOMEM); 422 421 422 + bru->base = type == VSP1_ENTITY_BRU ? VI6_BRU_BASE : VI6_BRS_BASE; 423 423 bru->entity.ops = &bru_entity_ops; 424 - bru->entity.type = VSP1_ENTITY_BRU; 424 + bru->entity.type = type; 425 425 426 - ret = vsp1_entity_init(vsp1, &bru->entity, "bru", 427 - vsp1->info->num_bru_inputs + 1, &bru_ops, 426 + if (type == VSP1_ENTITY_BRU) { 427 + num_pads = vsp1->info->num_bru_inputs + 1; 428 + name = "bru"; 429 + } else { 430 + num_pads = 3; 431 + name = "brs"; 432 + } 433 + 434 + ret = vsp1_entity_init(vsp1, &bru->entity, name, num_pads, &bru_ops, 428 435 MEDIA_ENT_F_PROC_VIDEO_COMPOSER); 429 436 if (ret < 0) 430 437 return ERR_PTR(ret); ··· 450 435 bru->entity.subdev.ctrl_handler = &bru->ctrls; 451 436 452 437 if (bru->ctrls.error) { 453 - dev_err(vsp1->dev, "bru: failed to initialize controls\n"); 438 + dev_err(vsp1->dev, "%s: failed to initialize controls\n", name); 454 439 ret = bru->ctrls.error; 455 440 vsp1_entity_destroy(&bru->entity); 456 441 return ERR_PTR(ret);
+3 -1
drivers/media/platform/vsp1/vsp1_bru.h
··· 26 26 27 27 struct vsp1_bru { 28 28 struct vsp1_entity entity; 29 + unsigned int base; 29 30 30 31 struct v4l2_ctrl_handler ctrls; 31 32 ··· 42 41 return container_of(subdev, struct vsp1_bru, entity.subdev); 43 42 } 44 43 45 - struct vsp1_bru *vsp1_bru_create(struct vsp1_device *vsp1); 44 + struct vsp1_bru *vsp1_bru_create(struct vsp1_device *vsp1, 45 + enum vsp1_entity_type type); 46 46 47 47 #endif /* __VSP1_BRU_H__ */
+132 -89
drivers/media/platform/vsp1/vsp1_dl.c
··· 95 95 * struct vsp1_dl_manager - Display List manager 96 96 * @index: index of the related WPF 97 97 * @mode: display list operation mode (header or headerless) 98 + * @singleshot: execute the display list in single-shot mode 98 99 * @vsp1: the VSP1 device 99 100 * @lock: protects the free, active, queued, pending and gc_fragments lists 100 101 * @free: array of all free display lists ··· 108 107 struct vsp1_dl_manager { 109 108 unsigned int index; 110 109 enum vsp1_dl_mode mode; 110 + bool singleshot; 111 111 struct vsp1_device *vsp1; 112 112 113 113 spinlock_t lock; ··· 439 437 440 438 static void vsp1_dl_list_fill_header(struct vsp1_dl_list *dl, bool is_last) 441 439 { 440 + struct vsp1_dl_manager *dlm = dl->dlm; 442 441 struct vsp1_dl_header_list *hdr = dl->header->lists; 443 442 struct vsp1_dl_body *dlb; 444 443 unsigned int num_lists = 0; ··· 464 461 465 462 dl->header->num_lists = num_lists; 466 463 467 - /* 468 - * If this display list's chain is not empty, we are on a list, where 469 - * the next item in the list is the display list entity which should be 470 - * automatically queued by the hardware. 471 - */ 472 464 if (!list_empty(&dl->chain) && !is_last) { 465 + /* 466 + * If this display list's chain is not empty, we are on a list, 467 + * and the next item is the display list that we must queue for 468 + * automatic processing by the hardware. 469 + */ 473 470 struct vsp1_dl_list *next = list_next_entry(dl, chain); 474 471 475 472 dl->header->next_header = next->dma; 476 473 dl->header->flags = VSP1_DLH_AUTO_START; 474 + } else if (!dlm->singleshot) { 475 + /* 476 + * if the display list manager works in continuous mode, the VSP 477 + * should loop over the display list continuously until 478 + * instructed to do otherwise. 479 + */ 480 + dl->header->next_header = dl->dma; 481 + dl->header->flags = VSP1_DLH_INT_ENABLE | VSP1_DLH_AUTO_START; 477 482 } else { 483 + /* 484 + * Otherwise, in mem-to-mem mode, we work in single-shot mode 485 + * and the next display list must not be started automatically. 486 + */ 478 487 dl->header->flags = VSP1_DLH_INT_ENABLE; 479 488 } 489 + } 490 + 491 + static bool vsp1_dl_list_hw_update_pending(struct vsp1_dl_manager *dlm) 492 + { 493 + struct vsp1_device *vsp1 = dlm->vsp1; 494 + 495 + if (!dlm->queued) 496 + return false; 497 + 498 + /* 499 + * Check whether the VSP1 has taken the update. In headerless mode the 500 + * hardware indicates this by clearing the UPD bit in the DL_BODY_SIZE 501 + * register, and in header mode by clearing the UPDHDR bit in the CMD 502 + * register. 503 + */ 504 + if (dlm->mode == VSP1_DL_MODE_HEADERLESS) 505 + return !!(vsp1_read(vsp1, VI6_DL_BODY_SIZE) 506 + & VI6_DL_BODY_SIZE_UPD); 507 + else 508 + return !!(vsp1_read(vsp1, VI6_CMD(dlm->index) & VI6_CMD_UPDHDR)); 509 + } 510 + 511 + static void vsp1_dl_list_hw_enqueue(struct vsp1_dl_list *dl) 512 + { 513 + struct vsp1_dl_manager *dlm = dl->dlm; 514 + struct vsp1_device *vsp1 = dlm->vsp1; 515 + 516 + if (dlm->mode == VSP1_DL_MODE_HEADERLESS) { 517 + /* 518 + * In headerless mode, program the hardware directly with the 519 + * display list body address and size and set the UPD bit. The 520 + * bit will be cleared by the hardware when the display list 521 + * processing starts. 522 + */ 523 + vsp1_write(vsp1, VI6_DL_HDR_ADDR(0), dl->body0.dma); 524 + vsp1_write(vsp1, VI6_DL_BODY_SIZE, VI6_DL_BODY_SIZE_UPD | 525 + (dl->body0.num_entries * sizeof(*dl->header->lists))); 526 + } else { 527 + /* 528 + * In header mode, program the display list header address. If 529 + * the hardware is idle (single-shot mode or first frame in 530 + * continuous mode) it will then be started independently. If 531 + * the hardware is operating, the VI6_DL_HDR_REF_ADDR register 532 + * will be updated with the display list address. 533 + */ 534 + vsp1_write(vsp1, VI6_DL_HDR_ADDR(dlm->index), dl->dma); 535 + } 536 + } 537 + 538 + static void vsp1_dl_list_commit_continuous(struct vsp1_dl_list *dl) 539 + { 540 + struct vsp1_dl_manager *dlm = dl->dlm; 541 + 542 + /* 543 + * If a previous display list has been queued to the hardware but not 544 + * processed yet, the VSP can start processing it at any time. In that 545 + * case we can't replace the queued list by the new one, as we could 546 + * race with the hardware. We thus mark the update as pending, it will 547 + * be queued up to the hardware by the frame end interrupt handler. 548 + */ 549 + if (vsp1_dl_list_hw_update_pending(dlm)) { 550 + __vsp1_dl_list_put(dlm->pending); 551 + dlm->pending = dl; 552 + return; 553 + } 554 + 555 + /* 556 + * Pass the new display list to the hardware and mark it as queued. It 557 + * will become active when the hardware starts processing it. 558 + */ 559 + vsp1_dl_list_hw_enqueue(dl); 560 + 561 + __vsp1_dl_list_put(dlm->queued); 562 + dlm->queued = dl; 563 + } 564 + 565 + static void vsp1_dl_list_commit_singleshot(struct vsp1_dl_list *dl) 566 + { 567 + struct vsp1_dl_manager *dlm = dl->dlm; 568 + 569 + /* 570 + * When working in single-shot mode, the caller guarantees that the 571 + * hardware is idle at this point. Just commit the head display list 572 + * to hardware. Chained lists will be started automatically. 573 + */ 574 + vsp1_dl_list_hw_enqueue(dl); 575 + 576 + dlm->active = dl; 480 577 } 481 578 482 579 void vsp1_dl_list_commit(struct vsp1_dl_list *dl) 483 580 { 484 581 struct vsp1_dl_manager *dlm = dl->dlm; 485 - struct vsp1_device *vsp1 = dlm->vsp1; 582 + struct vsp1_dl_list *dl_child; 486 583 unsigned long flags; 487 - bool update; 488 584 489 - spin_lock_irqsave(&dlm->lock, flags); 490 - 491 - if (dl->dlm->mode == VSP1_DL_MODE_HEADER) { 492 - struct vsp1_dl_list *dl_child; 493 - 494 - /* 495 - * In header mode the caller guarantees that the hardware is 496 - * idle at this point. 497 - */ 498 - 585 + if (dlm->mode == VSP1_DL_MODE_HEADER) { 499 586 /* Fill the header for the head and chained display lists. */ 500 587 vsp1_dl_list_fill_header(dl, list_empty(&dl->chain)); 501 588 ··· 594 501 595 502 vsp1_dl_list_fill_header(dl_child, last); 596 503 } 597 - 598 - /* 599 - * Commit the head display list to hardware. Chained headers 600 - * will auto-start. 601 - */ 602 - vsp1_write(vsp1, VI6_DL_HDR_ADDR(dlm->index), dl->dma); 603 - 604 - dlm->active = dl; 605 - goto done; 606 504 } 607 505 608 - /* 609 - * Once the UPD bit has been set the hardware can start processing the 610 - * display list at any time and we can't touch the address and size 611 - * registers. In that case mark the update as pending, it will be 612 - * queued up to the hardware by the frame end interrupt handler. 613 - */ 614 - update = !!(vsp1_read(vsp1, VI6_DL_BODY_SIZE) & VI6_DL_BODY_SIZE_UPD); 615 - if (update) { 616 - __vsp1_dl_list_put(dlm->pending); 617 - dlm->pending = dl; 618 - goto done; 619 - } 506 + spin_lock_irqsave(&dlm->lock, flags); 620 507 621 - /* 622 - * Program the hardware with the display list body address and size. 623 - * The UPD bit will be cleared by the device when the display list is 624 - * processed. 625 - */ 626 - vsp1_write(vsp1, VI6_DL_HDR_ADDR(0), dl->body0.dma); 627 - vsp1_write(vsp1, VI6_DL_BODY_SIZE, VI6_DL_BODY_SIZE_UPD | 628 - (dl->body0.num_entries * sizeof(*dl->header->lists))); 508 + if (dlm->singleshot) 509 + vsp1_dl_list_commit_singleshot(dl); 510 + else 511 + vsp1_dl_list_commit_continuous(dl); 629 512 630 - __vsp1_dl_list_put(dlm->queued); 631 - dlm->queued = dl; 632 - 633 - done: 634 513 spin_unlock_irqrestore(&dlm->lock, flags); 635 514 } 636 515 637 516 /* ----------------------------------------------------------------------------- 638 517 * Display List Manager 639 518 */ 640 - 641 - /* Interrupt Handling */ 642 - void vsp1_dlm_irq_display_start(struct vsp1_dl_manager *dlm) 643 - { 644 - spin_lock(&dlm->lock); 645 - 646 - /* 647 - * The display start interrupt signals the end of the display list 648 - * processing by the device. The active display list, if any, won't be 649 - * accessed anymore and can be reused. 650 - */ 651 - __vsp1_dl_list_put(dlm->active); 652 - dlm->active = NULL; 653 - 654 - spin_unlock(&dlm->lock); 655 - } 656 519 657 520 /** 658 521 * vsp1_dlm_irq_frame_end - Display list handler for the frame end interrupt ··· 621 572 */ 622 573 bool vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm) 623 574 { 624 - struct vsp1_device *vsp1 = dlm->vsp1; 625 575 bool completed = false; 626 576 627 577 spin_lock(&dlm->lock); 628 578 629 - __vsp1_dl_list_put(dlm->active); 630 - dlm->active = NULL; 631 - 632 579 /* 633 - * Header mode is used for mem-to-mem pipelines only. We don't need to 634 - * perform any operation as there can't be any new display list queued 635 - * in that case. 580 + * The mem-to-mem pipelines work in single-shot mode. No new display 581 + * list can be queued, we don't have to do anything. 636 582 */ 637 - if (dlm->mode == VSP1_DL_MODE_HEADER) { 583 + if (dlm->singleshot) { 584 + __vsp1_dl_list_put(dlm->active); 585 + dlm->active = NULL; 638 586 completed = true; 639 587 goto done; 640 588 } 641 589 642 590 /* 643 - * The UPD bit set indicates that the commit operation raced with the 644 - * interrupt and occurred after the frame end event and UPD clear but 645 - * before interrupt processing. The hardware hasn't taken the update 646 - * into account yet, we'll thus skip one frame and retry. 591 + * If the commit operation raced with the interrupt and occurred after 592 + * the frame end event but before interrupt processing, the hardware 593 + * hasn't taken the update into account yet. We have to skip one frame 594 + * and retry. 647 595 */ 648 - if (vsp1_read(vsp1, VI6_DL_BODY_SIZE) & VI6_DL_BODY_SIZE_UPD) 596 + if (vsp1_dl_list_hw_update_pending(dlm)) 649 597 goto done; 650 598 651 599 /* ··· 650 604 * frame end interrupt. The display list thus becomes active. 651 605 */ 652 606 if (dlm->queued) { 607 + __vsp1_dl_list_put(dlm->active); 653 608 dlm->active = dlm->queued; 654 609 dlm->queued = NULL; 655 610 completed = true; 656 611 } 657 612 658 613 /* 659 - * Now that the UPD bit has been cleared we can queue the next display 660 - * list to the hardware if one has been prepared. 614 + * Now that the VSP has started processing the queued display list, we 615 + * can queue the pending display list to the hardware if one has been 616 + * prepared. 661 617 */ 662 618 if (dlm->pending) { 663 - struct vsp1_dl_list *dl = dlm->pending; 664 - 665 - vsp1_write(vsp1, VI6_DL_HDR_ADDR(0), dl->body0.dma); 666 - vsp1_write(vsp1, VI6_DL_BODY_SIZE, VI6_DL_BODY_SIZE_UPD | 667 - (dl->body0.num_entries * 668 - sizeof(*dl->header->lists))); 669 - 670 - dlm->queued = dl; 619 + vsp1_dl_list_hw_enqueue(dlm->pending); 620 + dlm->queued = dlm->pending; 671 621 dlm->pending = NULL; 672 622 } 673 623 ··· 756 714 dlm->index = index; 757 715 dlm->mode = index == 0 && !vsp1->info->uapi 758 716 ? VSP1_DL_MODE_HEADERLESS : VSP1_DL_MODE_HEADER; 717 + dlm->singleshot = vsp1->info->uapi; 759 718 dlm->vsp1 = vsp1; 760 719 761 720 spin_lock_init(&dlm->lock);
-1
drivers/media/platform/vsp1/vsp1_dl.h
··· 27 27 unsigned int prealloc); 28 28 void vsp1_dlm_destroy(struct vsp1_dl_manager *dlm); 29 29 void vsp1_dlm_reset(struct vsp1_dl_manager *dlm); 30 - void vsp1_dlm_irq_display_start(struct vsp1_dl_manager *dlm); 31 30 bool vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm); 32 31 33 32 struct vsp1_dl_list *vsp1_dl_list_get(struct vsp1_dl_manager *dlm);
+146 -140
drivers/media/platform/vsp1/vsp1_drm.c
··· 32 32 * Interrupt Handling 33 33 */ 34 34 35 - void vsp1_drm_display_start(struct vsp1_device *vsp1) 35 + static void vsp1_du_pipeline_frame_end(struct vsp1_pipeline *pipe, 36 + bool completed) 36 37 { 37 - vsp1_dlm_irq_display_start(vsp1->drm->pipe.output->dlm); 38 - } 38 + struct vsp1_drm_pipeline *drm_pipe = to_vsp1_drm_pipeline(pipe); 39 39 40 - static void vsp1_du_pipeline_frame_end(struct vsp1_pipeline *pipe) 41 - { 42 - struct vsp1_drm *drm = to_vsp1_drm(pipe); 43 - 44 - if (drm->du_complete) 45 - drm->du_complete(drm->du_private); 40 + if (drm_pipe->du_complete) 41 + drm_pipe->du_complete(drm_pipe->du_private, completed); 46 42 } 47 43 48 44 /* ----------------------------------------------------------------------------- ··· 59 63 /** 60 64 * vsp1_du_setup_lif - Setup the output part of the VSP pipeline 61 65 * @dev: the VSP device 66 + * @pipe_index: the DRM pipeline index 62 67 * @cfg: the LIF configuration 63 68 * 64 69 * Configure the output part of VSP DRM pipeline for the given frame @cfg.width 65 - * and @cfg.height. This sets up formats on the BRU source pad, the WPF0 sink 66 - * and source pads, and the LIF sink pad. 70 + * and @cfg.height. This sets up formats on the blend unit (BRU or BRS) source 71 + * pad, the WPF sink and source pads, and the LIF sink pad. 67 72 * 68 - * As the media bus code on the BRU source pad is conditioned by the 69 - * configuration of the BRU sink 0 pad, we also set up the formats on all BRU 73 + * The @pipe_index argument selects which DRM pipeline to setup. The number of 74 + * available pipelines depend on the VSP instance. 75 + * 76 + * As the media bus code on the blend unit source pad is conditioned by the 77 + * configuration of its sink 0 pad, we also set up the formats on all blend unit 70 78 * sinks, even if the configuration will be overwritten later by 71 - * vsp1_du_setup_rpf(). This ensures that the BRU configuration is set to a well 72 - * defined state. 79 + * vsp1_du_setup_rpf(). This ensures that the blend unit configuration is set to 80 + * a well defined state. 73 81 * 74 82 * Return 0 on success or a negative error code on failure. 75 83 */ 76 - int vsp1_du_setup_lif(struct device *dev, const struct vsp1_du_lif_config *cfg) 84 + int vsp1_du_setup_lif(struct device *dev, unsigned int pipe_index, 85 + const struct vsp1_du_lif_config *cfg) 77 86 { 78 87 struct vsp1_device *vsp1 = dev_get_drvdata(dev); 79 - struct vsp1_pipeline *pipe = &vsp1->drm->pipe; 80 - struct vsp1_bru *bru = vsp1->bru; 88 + struct vsp1_drm_pipeline *drm_pipe; 89 + struct vsp1_pipeline *pipe; 90 + struct vsp1_bru *bru; 81 91 struct v4l2_subdev_format format; 92 + const char *bru_name; 82 93 unsigned int i; 83 94 int ret; 95 + 96 + if (pipe_index >= vsp1->info->lif_count) 97 + return -EINVAL; 98 + 99 + drm_pipe = &vsp1->drm->pipe[pipe_index]; 100 + pipe = &drm_pipe->pipe; 101 + bru = to_bru(&pipe->bru->subdev); 102 + bru_name = pipe->bru->type == VSP1_ENTITY_BRU ? "BRU" : "BRS"; 84 103 85 104 if (!cfg) { 86 105 /* ··· 108 97 109 98 media_pipeline_stop(&pipe->output->entity.subdev.entity); 110 99 111 - for (i = 0; i < bru->entity.source_pad; ++i) { 112 - vsp1->drm->inputs[i].enabled = false; 113 - bru->inputs[i].rpf = NULL; 100 + for (i = 0; i < ARRAY_SIZE(pipe->inputs); ++i) { 101 + struct vsp1_rwpf *rpf = pipe->inputs[i]; 102 + 103 + if (!rpf) 104 + continue; 105 + 106 + /* 107 + * Remove the RPF from the pipe and the list of BRU 108 + * inputs. 109 + */ 110 + WARN_ON(list_empty(&rpf->entity.list_pipe)); 111 + list_del_init(&rpf->entity.list_pipe); 114 112 pipe->inputs[i] = NULL; 113 + 114 + bru->inputs[rpf->bru_input].rpf = NULL; 115 115 } 116 116 117 + drm_pipe->du_complete = NULL; 117 118 pipe->num_inputs = 0; 118 - vsp1->drm->du_complete = NULL; 119 119 120 120 vsp1_dlm_reset(pipe->output->dlm); 121 121 vsp1_device_put(vsp1); ··· 136 114 return 0; 137 115 } 138 116 139 - dev_dbg(vsp1->dev, "%s: configuring LIF with format %ux%u\n", 140 - __func__, cfg->width, cfg->height); 117 + dev_dbg(vsp1->dev, "%s: configuring LIF%u with format %ux%u\n", 118 + __func__, pipe_index, cfg->width, cfg->height); 141 119 142 120 /* 143 121 * Configure the format at the BRU sinks and propagate it through the ··· 146 124 memset(&format, 0, sizeof(format)); 147 125 format.which = V4L2_SUBDEV_FORMAT_ACTIVE; 148 126 149 - for (i = 0; i < bru->entity.source_pad; ++i) { 127 + for (i = 0; i < pipe->bru->source_pad; ++i) { 150 128 format.pad = i; 151 129 152 130 format.format.width = cfg->width; ··· 154 132 format.format.code = MEDIA_BUS_FMT_ARGB8888_1X32; 155 133 format.format.field = V4L2_FIELD_NONE; 156 134 157 - ret = v4l2_subdev_call(&bru->entity.subdev, pad, 135 + ret = v4l2_subdev_call(&pipe->bru->subdev, pad, 158 136 set_fmt, NULL, &format); 159 137 if (ret < 0) 160 138 return ret; 161 139 162 - dev_dbg(vsp1->dev, "%s: set format %ux%u (%x) on BRU pad %u\n", 140 + dev_dbg(vsp1->dev, "%s: set format %ux%u (%x) on %s pad %u\n", 163 141 __func__, format.format.width, format.format.height, 164 - format.format.code, i); 142 + format.format.code, bru_name, i); 165 143 } 166 144 167 - format.pad = bru->entity.source_pad; 145 + format.pad = pipe->bru->source_pad; 168 146 format.format.width = cfg->width; 169 147 format.format.height = cfg->height; 170 148 format.format.code = MEDIA_BUS_FMT_ARGB8888_1X32; 171 149 format.format.field = V4L2_FIELD_NONE; 172 150 173 - ret = v4l2_subdev_call(&bru->entity.subdev, pad, set_fmt, NULL, 151 + ret = v4l2_subdev_call(&pipe->bru->subdev, pad, set_fmt, NULL, 174 152 &format); 175 153 if (ret < 0) 176 154 return ret; 177 155 178 - dev_dbg(vsp1->dev, "%s: set format %ux%u (%x) on BRU pad %u\n", 156 + dev_dbg(vsp1->dev, "%s: set format %ux%u (%x) on %s pad %u\n", 179 157 __func__, format.format.width, format.format.height, 180 - format.format.code, i); 158 + format.format.code, bru_name, i); 181 159 182 160 format.pad = RWPF_PAD_SINK; 183 - ret = v4l2_subdev_call(&vsp1->wpf[0]->entity.subdev, pad, set_fmt, NULL, 161 + ret = v4l2_subdev_call(&pipe->output->entity.subdev, pad, set_fmt, NULL, 184 162 &format); 185 163 if (ret < 0) 186 164 return ret; 187 165 188 - dev_dbg(vsp1->dev, "%s: set format %ux%u (%x) on WPF0 sink\n", 166 + dev_dbg(vsp1->dev, "%s: set format %ux%u (%x) on WPF%u sink\n", 189 167 __func__, format.format.width, format.format.height, 190 - format.format.code); 168 + format.format.code, pipe->output->entity.index); 191 169 192 170 format.pad = RWPF_PAD_SOURCE; 193 - ret = v4l2_subdev_call(&vsp1->wpf[0]->entity.subdev, pad, get_fmt, NULL, 171 + ret = v4l2_subdev_call(&pipe->output->entity.subdev, pad, get_fmt, NULL, 194 172 &format); 195 173 if (ret < 0) 196 174 return ret; 197 175 198 - dev_dbg(vsp1->dev, "%s: got format %ux%u (%x) on WPF0 source\n", 176 + dev_dbg(vsp1->dev, "%s: got format %ux%u (%x) on WPF%u source\n", 199 177 __func__, format.format.width, format.format.height, 200 - format.format.code); 178 + format.format.code, pipe->output->entity.index); 201 179 202 180 format.pad = LIF_PAD_SINK; 203 - ret = v4l2_subdev_call(&vsp1->lif->entity.subdev, pad, set_fmt, NULL, 181 + ret = v4l2_subdev_call(&pipe->lif->subdev, pad, set_fmt, NULL, 204 182 &format); 205 183 if (ret < 0) 206 184 return ret; 207 185 208 - dev_dbg(vsp1->dev, "%s: set format %ux%u (%x) on LIF sink\n", 186 + dev_dbg(vsp1->dev, "%s: set format %ux%u (%x) on LIF%u sink\n", 209 187 __func__, format.format.width, format.format.height, 210 - format.format.code); 188 + format.format.code, pipe_index); 211 189 212 190 /* 213 191 * Verify that the format at the output of the pipeline matches the ··· 235 213 * Register a callback to allow us to notify the DRM driver of frame 236 214 * completion events. 237 215 */ 238 - vsp1->drm->du_complete = cfg->callback; 239 - vsp1->drm->du_private = cfg->callback_data; 216 + drm_pipe->du_complete = cfg->callback; 217 + drm_pipe->du_private = cfg->callback_data; 240 218 241 219 ret = media_pipeline_start(&pipe->output->entity.subdev.entity, 242 220 &pipe->pipe); ··· 245 223 vsp1_device_put(vsp1); 246 224 return ret; 247 225 } 226 + 227 + /* Disable the display interrupts. */ 228 + vsp1_write(vsp1, VI6_DISP_IRQ_STA, 0); 229 + vsp1_write(vsp1, VI6_DISP_IRQ_ENB, 0); 248 230 249 231 dev_dbg(vsp1->dev, "%s: pipeline enabled\n", __func__); 250 232 ··· 259 233 /** 260 234 * vsp1_du_atomic_begin - Prepare for an atomic update 261 235 * @dev: the VSP device 236 + * @pipe_index: the DRM pipeline index 262 237 */ 263 - void vsp1_du_atomic_begin(struct device *dev) 238 + void vsp1_du_atomic_begin(struct device *dev, unsigned int pipe_index) 264 239 { 265 240 struct vsp1_device *vsp1 = dev_get_drvdata(dev); 266 - struct vsp1_pipeline *pipe = &vsp1->drm->pipe; 241 + struct vsp1_drm_pipeline *drm_pipe = &vsp1->drm->pipe[pipe_index]; 267 242 268 - vsp1->drm->num_inputs = pipe->num_inputs; 243 + drm_pipe->enabled = drm_pipe->pipe.num_inputs != 0; 269 244 } 270 245 EXPORT_SYMBOL_GPL(vsp1_du_atomic_begin); 271 246 272 247 /** 273 248 * vsp1_du_atomic_update - Setup one RPF input of the VSP pipeline 274 249 * @dev: the VSP device 250 + * @pipe_index: the DRM pipeline index 275 251 * @rpf_index: index of the RPF to setup (0-based) 276 252 * @cfg: the RPF configuration 277 253 * ··· 300 272 * 301 273 * Return 0 on success or a negative error code on failure. 302 274 */ 303 - int vsp1_du_atomic_update(struct device *dev, unsigned int rpf_index, 275 + int vsp1_du_atomic_update(struct device *dev, unsigned int pipe_index, 276 + unsigned int rpf_index, 304 277 const struct vsp1_du_atomic_config *cfg) 305 278 { 306 279 struct vsp1_device *vsp1 = dev_get_drvdata(dev); 280 + struct vsp1_drm_pipeline *drm_pipe = &vsp1->drm->pipe[pipe_index]; 307 281 const struct vsp1_format_info *fmtinfo; 308 282 struct vsp1_rwpf *rpf; 309 283 ··· 318 288 dev_dbg(vsp1->dev, "%s: RPF%u: disable requested\n", __func__, 319 289 rpf_index); 320 290 321 - vsp1->drm->inputs[rpf_index].enabled = false; 291 + /* 292 + * Remove the RPF from the pipe's inputs. The atomic flush 293 + * handler will disable the input and remove the entity from the 294 + * pipe's entities list. 295 + */ 296 + drm_pipe->pipe.inputs[rpf_index] = NULL; 322 297 return 0; 323 298 } 324 299 ··· 359 324 vsp1->drm->inputs[rpf_index].crop = cfg->src; 360 325 vsp1->drm->inputs[rpf_index].compose = cfg->dst; 361 326 vsp1->drm->inputs[rpf_index].zpos = cfg->zpos; 362 - vsp1->drm->inputs[rpf_index].enabled = true; 327 + 328 + drm_pipe->pipe.inputs[rpf_index] = rpf; 363 329 364 330 return 0; 365 331 } 366 332 EXPORT_SYMBOL_GPL(vsp1_du_atomic_update); 367 333 368 334 static int vsp1_du_setup_rpf_pipe(struct vsp1_device *vsp1, 335 + struct vsp1_pipeline *pipe, 369 336 struct vsp1_rwpf *rpf, unsigned int bru_input) 370 337 { 371 338 struct v4l2_subdev_selection sel; ··· 441 404 /* BRU sink, propagate the format from the RPF source. */ 442 405 format.pad = bru_input; 443 406 444 - ret = v4l2_subdev_call(&vsp1->bru->entity.subdev, pad, set_fmt, NULL, 407 + ret = v4l2_subdev_call(&pipe->bru->subdev, pad, set_fmt, NULL, 445 408 &format); 446 409 if (ret < 0) 447 410 return ret; ··· 454 417 sel.target = V4L2_SEL_TGT_COMPOSE; 455 418 sel.r = vsp1->drm->inputs[rpf->entity.index].compose; 456 419 457 - ret = v4l2_subdev_call(&vsp1->bru->entity.subdev, pad, set_selection, 458 - NULL, &sel); 420 + ret = v4l2_subdev_call(&pipe->bru->subdev, pad, set_selection, NULL, 421 + &sel); 459 422 if (ret < 0) 460 423 return ret; 461 424 ··· 475 438 /** 476 439 * vsp1_du_atomic_flush - Commit an atomic update 477 440 * @dev: the VSP device 441 + * @pipe_index: the DRM pipeline index 478 442 */ 479 - void vsp1_du_atomic_flush(struct device *dev) 443 + void vsp1_du_atomic_flush(struct device *dev, unsigned int pipe_index) 480 444 { 481 445 struct vsp1_device *vsp1 = dev_get_drvdata(dev); 482 - struct vsp1_pipeline *pipe = &vsp1->drm->pipe; 446 + struct vsp1_drm_pipeline *drm_pipe = &vsp1->drm->pipe[pipe_index]; 447 + struct vsp1_pipeline *pipe = &drm_pipe->pipe; 483 448 struct vsp1_rwpf *inputs[VSP1_MAX_RPF] = { NULL, }; 449 + struct vsp1_bru *bru = to_bru(&pipe->bru->subdev); 484 450 struct vsp1_entity *entity; 451 + struct vsp1_entity *next; 485 452 struct vsp1_dl_list *dl; 453 + const char *bru_name; 486 454 unsigned long flags; 487 455 unsigned int i; 488 456 int ret; 457 + 458 + bru_name = pipe->bru->type == VSP1_ENTITY_BRU ? "BRU" : "BRS"; 489 459 490 460 /* Prepare the display list. */ 491 461 dl = vsp1_dl_list_get(pipe->output->dlm); ··· 504 460 struct vsp1_rwpf *rpf = vsp1->rpf[i]; 505 461 unsigned int j; 506 462 507 - if (!vsp1->drm->inputs[i].enabled) { 508 - pipe->inputs[i] = NULL; 463 + if (!pipe->inputs[i]) 509 464 continue; 510 - } 511 - 512 - pipe->inputs[i] = rpf; 513 465 514 466 /* Insert the RPF in the sorted RPFs array. */ 515 467 for (j = pipe->num_inputs++; j > 0; --j) { ··· 518 478 } 519 479 520 480 /* Setup the RPF input pipeline for every enabled input. */ 521 - for (i = 0; i < vsp1->info->num_bru_inputs; ++i) { 481 + for (i = 0; i < pipe->bru->source_pad; ++i) { 522 482 struct vsp1_rwpf *rpf = inputs[i]; 523 483 524 484 if (!rpf) { 525 - vsp1->bru->inputs[i].rpf = NULL; 485 + bru->inputs[i].rpf = NULL; 526 486 continue; 527 487 } 528 488 529 - vsp1->bru->inputs[i].rpf = rpf; 489 + if (list_empty(&rpf->entity.list_pipe)) 490 + list_add_tail(&rpf->entity.list_pipe, &pipe->entities); 491 + 492 + bru->inputs[i].rpf = rpf; 530 493 rpf->bru_input = i; 494 + rpf->entity.sink = pipe->bru; 531 495 rpf->entity.sink_pad = i; 532 496 533 - dev_dbg(vsp1->dev, "%s: connecting RPF.%u to BRU:%u\n", 534 - __func__, rpf->entity.index, i); 497 + dev_dbg(vsp1->dev, "%s: connecting RPF.%u to %s:%u\n", 498 + __func__, rpf->entity.index, bru_name, i); 535 499 536 - ret = vsp1_du_setup_rpf_pipe(vsp1, rpf, i); 500 + ret = vsp1_du_setup_rpf_pipe(vsp1, pipe, rpf, i); 537 501 if (ret < 0) 538 502 dev_err(vsp1->dev, 539 503 "%s: failed to setup RPF.%u\n", ··· 545 501 } 546 502 547 503 /* Configure all entities in the pipeline. */ 548 - list_for_each_entry(entity, &pipe->entities, list_pipe) { 504 + list_for_each_entry_safe(entity, next, &pipe->entities, list_pipe) { 549 505 /* Disconnect unused RPFs from the pipeline. */ 550 - if (entity->type == VSP1_ENTITY_RPF) { 551 - struct vsp1_rwpf *rpf = to_rwpf(&entity->subdev); 506 + if (entity->type == VSP1_ENTITY_RPF && 507 + !pipe->inputs[entity->index]) { 508 + vsp1_dl_list_write(dl, entity->route->reg, 509 + VI6_DPR_NODE_UNUSED); 552 510 553 - if (!pipe->inputs[rpf->entity.index]) { 554 - vsp1_dl_list_write(dl, entity->route->reg, 555 - VI6_DPR_NODE_UNUSED); 556 - continue; 557 - } 511 + list_del_init(&entity->list_pipe); 512 + 513 + continue; 558 514 } 559 515 560 516 vsp1_entity_route_setup(entity, pipe, dl); ··· 572 528 vsp1_dl_list_commit(dl); 573 529 574 530 /* Start or stop the pipeline if needed. */ 575 - if (!vsp1->drm->num_inputs && pipe->num_inputs) { 576 - vsp1_write(vsp1, VI6_DISP_IRQ_STA, 0); 577 - vsp1_write(vsp1, VI6_DISP_IRQ_ENB, VI6_DISP_IRQ_ENB_DSTE); 531 + if (!drm_pipe->enabled && pipe->num_inputs) { 578 532 spin_lock_irqsave(&pipe->irqlock, flags); 579 533 vsp1_pipeline_run(pipe); 580 534 spin_unlock_irqrestore(&pipe->irqlock, flags); 581 - } else if (vsp1->drm->num_inputs && !pipe->num_inputs) { 582 - vsp1_write(vsp1, VI6_DISP_IRQ_ENB, 0); 535 + } else if (drm_pipe->enabled && !pipe->num_inputs) { 583 536 vsp1_pipeline_stop(pipe); 584 537 } 585 538 } ··· 609 568 * Initialization 610 569 */ 611 570 612 - int vsp1_drm_create_links(struct vsp1_device *vsp1) 613 - { 614 - const u32 flags = MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE; 615 - unsigned int i; 616 - int ret; 617 - 618 - /* 619 - * VSPD instances require a BRU to perform composition and a LIF to 620 - * output to the DU. 621 - */ 622 - if (!vsp1->bru || !vsp1->lif) 623 - return -ENXIO; 624 - 625 - for (i = 0; i < vsp1->info->rpf_count; ++i) { 626 - struct vsp1_rwpf *rpf = vsp1->rpf[i]; 627 - 628 - ret = media_create_pad_link(&rpf->entity.subdev.entity, 629 - RWPF_PAD_SOURCE, 630 - &vsp1->bru->entity.subdev.entity, 631 - i, flags); 632 - if (ret < 0) 633 - return ret; 634 - 635 - rpf->entity.sink = &vsp1->bru->entity.subdev.entity; 636 - rpf->entity.sink_pad = i; 637 - } 638 - 639 - ret = media_create_pad_link(&vsp1->bru->entity.subdev.entity, 640 - vsp1->bru->entity.source_pad, 641 - &vsp1->wpf[0]->entity.subdev.entity, 642 - RWPF_PAD_SINK, flags); 643 - if (ret < 0) 644 - return ret; 645 - 646 - vsp1->bru->entity.sink = &vsp1->wpf[0]->entity.subdev.entity; 647 - vsp1->bru->entity.sink_pad = RWPF_PAD_SINK; 648 - 649 - ret = media_create_pad_link(&vsp1->wpf[0]->entity.subdev.entity, 650 - RWPF_PAD_SOURCE, 651 - &vsp1->lif->entity.subdev.entity, 652 - LIF_PAD_SINK, flags); 653 - if (ret < 0) 654 - return ret; 655 - 656 - return 0; 657 - } 658 - 659 571 int vsp1_drm_init(struct vsp1_device *vsp1) 660 572 { 661 - struct vsp1_pipeline *pipe; 662 573 unsigned int i; 663 574 664 575 vsp1->drm = devm_kzalloc(vsp1->dev, sizeof(*vsp1->drm), GFP_KERNEL); 665 576 if (!vsp1->drm) 666 577 return -ENOMEM; 667 578 668 - pipe = &vsp1->drm->pipe; 579 + /* Create one DRM pipeline per LIF. */ 580 + for (i = 0; i < vsp1->info->lif_count; ++i) { 581 + struct vsp1_drm_pipeline *drm_pipe = &vsp1->drm->pipe[i]; 582 + struct vsp1_pipeline *pipe = &drm_pipe->pipe; 669 583 670 - vsp1_pipeline_init(pipe); 584 + vsp1_pipeline_init(pipe); 671 585 672 - /* The DRM pipeline is static, add entities manually. */ 586 + /* 587 + * The DRM pipeline is static, add entities manually. The first 588 + * pipeline uses the BRU and the second pipeline the BRS. 589 + */ 590 + pipe->bru = i == 0 ? &vsp1->bru->entity : &vsp1->brs->entity; 591 + pipe->lif = &vsp1->lif[i]->entity; 592 + pipe->output = vsp1->wpf[i]; 593 + pipe->output->pipe = pipe; 594 + pipe->frame_end = vsp1_du_pipeline_frame_end; 595 + 596 + pipe->bru->sink = &pipe->output->entity; 597 + pipe->bru->sink_pad = 0; 598 + pipe->output->entity.sink = pipe->lif; 599 + pipe->output->entity.sink_pad = 0; 600 + 601 + list_add_tail(&pipe->bru->list_pipe, &pipe->entities); 602 + list_add_tail(&pipe->lif->list_pipe, &pipe->entities); 603 + list_add_tail(&pipe->output->entity.list_pipe, &pipe->entities); 604 + } 605 + 606 + /* Disable all RPFs initially. */ 673 607 for (i = 0; i < vsp1->info->rpf_count; ++i) { 674 608 struct vsp1_rwpf *input = vsp1->rpf[i]; 675 609 676 - list_add_tail(&input->entity.list_pipe, &pipe->entities); 610 + INIT_LIST_HEAD(&input->entity.list_pipe); 677 611 } 678 - 679 - list_add_tail(&vsp1->bru->entity.list_pipe, &pipe->entities); 680 - list_add_tail(&vsp1->wpf[0]->entity.list_pipe, &pipe->entities); 681 - list_add_tail(&vsp1->lif->entity.list_pipe, &pipe->entities); 682 - 683 - pipe->bru = &vsp1->bru->entity; 684 - pipe->lif = &vsp1->lif->entity; 685 - pipe->output = vsp1->wpf[0]; 686 - pipe->output->pipe = pipe; 687 - pipe->frame_end = vsp1_du_pipeline_frame_end; 688 612 689 613 return 0; 690 614 }
+22 -16
drivers/media/platform/vsp1/vsp1_drm.h
··· 18 18 #include "vsp1_pipe.h" 19 19 20 20 /** 21 - * vsp1_drm - State for the API exposed to the DRM driver 21 + * vsp1_drm_pipeline - State for the API exposed to the DRM driver 22 22 * @pipe: the VSP1 pipeline used for display 23 - * @num_inputs: number of active pipeline inputs at the beginning of an update 24 - * @inputs: source crop rectangle, destination compose rectangle and z-order 25 - * position for every input 23 + * @enabled: pipeline state at the beginning of an update 26 24 * @du_complete: frame completion callback for the DU driver (optional) 27 25 * @du_private: data to be passed to the du_complete callback 28 26 */ 29 - struct vsp1_drm { 27 + struct vsp1_drm_pipeline { 30 28 struct vsp1_pipeline pipe; 31 - unsigned int num_inputs; 29 + bool enabled; 30 + 31 + /* Frame synchronisation */ 32 + void (*du_complete)(void *, bool); 33 + void *du_private; 34 + }; 35 + 36 + /** 37 + * vsp1_drm - State for the API exposed to the DRM driver 38 + * @pipe: the VSP1 DRM pipeline used for display 39 + * @inputs: source crop rectangle, destination compose rectangle and z-order 40 + * position for every input (indexed by RPF index) 41 + */ 42 + struct vsp1_drm { 43 + struct vsp1_drm_pipeline pipe[VSP1_MAX_LIF]; 44 + 32 45 struct { 33 - bool enabled; 34 46 struct v4l2_rect crop; 35 47 struct v4l2_rect compose; 36 48 unsigned int zpos; 37 49 } inputs[VSP1_MAX_RPF]; 38 - 39 - /* Frame synchronisation */ 40 - void (*du_complete)(void *); 41 - void *du_private; 42 50 }; 43 51 44 - static inline struct vsp1_drm *to_vsp1_drm(struct vsp1_pipeline *pipe) 52 + static inline struct vsp1_drm_pipeline * 53 + to_vsp1_drm_pipeline(struct vsp1_pipeline *pipe) 45 54 { 46 - return container_of(pipe, struct vsp1_drm, pipe); 55 + return container_of(pipe, struct vsp1_drm_pipeline, pipe); 47 56 } 48 57 49 58 int vsp1_drm_init(struct vsp1_device *vsp1); 50 59 void vsp1_drm_cleanup(struct vsp1_device *vsp1); 51 - int vsp1_drm_create_links(struct vsp1_device *vsp1); 52 - 53 - void vsp1_drm_display_start(struct vsp1_device *vsp1); 54 60 55 61 #endif /* __VSP1_DRM_H__ */
+77 -38
drivers/media/platform/vsp1/vsp1_drv.c
··· 68 68 } 69 69 } 70 70 71 - status = vsp1_read(vsp1, VI6_DISP_IRQ_STA); 72 - vsp1_write(vsp1, VI6_DISP_IRQ_STA, ~status & VI6_DISP_IRQ_STA_DST); 73 - 74 - if (status & VI6_DISP_IRQ_STA_DST) { 75 - vsp1_drm_display_start(vsp1); 76 - ret = IRQ_HANDLED; 77 - } 78 - 79 71 return ret; 80 72 } 81 73 ··· 84 92 * 85 93 * - from a UDS to a UDS (UDS entities can't be chained) 86 94 * - from an entity to itself (no loops are allowed) 95 + * 96 + * Furthermore, the BRS can't be connected to histogram generators, but no 97 + * special check is currently needed as all VSP instances that include a BRS 98 + * have no histogram generator. 87 99 */ 88 100 static int vsp1_create_sink_links(struct vsp1_device *vsp1, 89 101 struct vsp1_entity *sink) ··· 125 129 return ret; 126 130 127 131 if (flags & MEDIA_LNK_FL_ENABLED) 128 - source->sink = entity; 132 + source->sink = sink; 129 133 } 130 134 } 131 135 ··· 168 172 return ret; 169 173 } 170 174 171 - if (vsp1->lif) { 172 - ret = media_create_pad_link(&vsp1->wpf[0]->entity.subdev.entity, 175 + for (i = 0; i < vsp1->info->lif_count; ++i) { 176 + if (!vsp1->lif[i]) 177 + continue; 178 + 179 + ret = media_create_pad_link(&vsp1->wpf[i]->entity.subdev.entity, 173 180 RWPF_PAD_SOURCE, 174 - &vsp1->lif->entity.subdev.entity, 181 + &vsp1->lif[i]->entity.subdev.entity, 175 182 LIF_PAD_SINK, 0); 176 183 if (ret < 0) 177 184 return ret; ··· 268 269 } 269 270 270 271 /* Instantiate all the entities. */ 272 + if (vsp1->info->features & VSP1_HAS_BRS) { 273 + vsp1->brs = vsp1_bru_create(vsp1, VSP1_ENTITY_BRS); 274 + if (IS_ERR(vsp1->brs)) { 275 + ret = PTR_ERR(vsp1->brs); 276 + goto done; 277 + } 278 + 279 + list_add_tail(&vsp1->brs->entity.list_dev, &vsp1->entities); 280 + } 281 + 271 282 if (vsp1->info->features & VSP1_HAS_BRU) { 272 - vsp1->bru = vsp1_bru_create(vsp1); 283 + vsp1->bru = vsp1_bru_create(vsp1, VSP1_ENTITY_BRU); 273 284 if (IS_ERR(vsp1->bru)) { 274 285 ret = PTR_ERR(vsp1->bru); 275 286 goto done; ··· 337 328 } 338 329 339 330 /* 340 - * The LIF is only supported when used in conjunction with the DU, in 331 + * The LIFs are only supported when used in conjunction with the DU, in 341 332 * which case the userspace API is disabled. If the userspace API is 342 - * enabled skip the LIF, even when present. 333 + * enabled skip the LIFs, even when present. 343 334 */ 344 - if (vsp1->info->features & VSP1_HAS_LIF && !vsp1->info->uapi) { 345 - vsp1->lif = vsp1_lif_create(vsp1); 346 - if (IS_ERR(vsp1->lif)) { 347 - ret = PTR_ERR(vsp1->lif); 348 - goto done; 349 - } 335 + if (!vsp1->info->uapi) { 336 + for (i = 0; i < vsp1->info->lif_count; ++i) { 337 + struct vsp1_lif *lif; 350 338 351 - list_add_tail(&vsp1->lif->entity.list_dev, &vsp1->entities); 339 + lif = vsp1_lif_create(vsp1, i); 340 + if (IS_ERR(lif)) { 341 + ret = PTR_ERR(lif); 342 + goto done; 343 + } 344 + 345 + vsp1->lif[i] = lif; 346 + list_add_tail(&lif->entity.list_dev, &vsp1->entities); 347 + } 352 348 } 353 349 354 350 if (vsp1->info->features & VSP1_HAS_LUT) { ··· 434 420 } 435 421 436 422 list_add_tail(&video->list, &vsp1->videos); 437 - wpf->entity.sink = &video->video.entity; 438 423 } 439 424 } 440 425 ··· 445 432 goto done; 446 433 } 447 434 448 - /* Create links. */ 449 - if (vsp1->info->uapi) 450 - ret = vsp1_uapi_create_links(vsp1); 451 - else 452 - ret = vsp1_drm_create_links(vsp1); 453 - if (ret < 0) 454 - goto done; 455 - 456 435 /* 457 - * Register subdev nodes if the userspace API is enabled or initialize 458 - * the DRM pipeline otherwise. 436 + * Create links and register subdev nodes if the userspace API is 437 + * enabled or initialize the DRM pipeline otherwise. 459 438 */ 460 439 if (vsp1->info->uapi) { 440 + ret = vsp1_uapi_create_links(vsp1); 441 + if (ret < 0) 442 + goto done; 443 + 461 444 ret = v4l2_device_register_subdev_nodes(&vsp1->v4l2_dev); 462 445 if (ret < 0) 463 446 goto done; ··· 523 514 vsp1_write(vsp1, VI6_DPR_HST_ROUTE, VI6_DPR_NODE_UNUSED); 524 515 vsp1_write(vsp1, VI6_DPR_HSI_ROUTE, VI6_DPR_NODE_UNUSED); 525 516 vsp1_write(vsp1, VI6_DPR_BRU_ROUTE, VI6_DPR_NODE_UNUSED); 517 + 518 + if (vsp1->info->features & VSP1_HAS_BRS) 519 + vsp1_write(vsp1, VI6_DPR_ILV_BRS_ROUTE, VI6_DPR_NODE_UNUSED); 526 520 527 521 vsp1_write(vsp1, VI6_DPR_HGO_SMPPT, (7 << VI6_DPR_SMPPT_TGW_SHIFT) | 528 522 (VI6_DPR_NODE_UNUSED << VI6_DPR_SMPPT_PT_SHIFT)); ··· 646 634 .version = VI6_IP_VERSION_MODEL_VSPD_GEN2, 647 635 .model = "VSP1-D", 648 636 .gen = 2, 649 - .features = VSP1_HAS_BRU | VSP1_HAS_HGO | VSP1_HAS_LIF 650 - | VSP1_HAS_LUT, 637 + .features = VSP1_HAS_BRU | VSP1_HAS_HGO | VSP1_HAS_LUT, 638 + .lif_count = 1, 651 639 .rpf_count = 4, 652 640 .uds_count = 1, 653 641 .wpf_count = 1, ··· 680 668 .version = VI6_IP_VERSION_MODEL_VSPD_V2H, 681 669 .model = "VSP1V-D", 682 670 .gen = 2, 683 - .features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_LUT 684 - | VSP1_HAS_LIF, 671 + .features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_LUT, 672 + .lif_count = 1, 685 673 .rpf_count = 4, 686 674 .uds_count = 1, 687 675 .wpf_count = 1, ··· 718 706 .num_bru_inputs = 5, 719 707 .uapi = true, 720 708 }, { 709 + .version = VI6_IP_VERSION_MODEL_VSPBS_GEN3, 710 + .model = "VSP2-BS", 711 + .gen = 3, 712 + .features = VSP1_HAS_BRS | VSP1_HAS_WPF_VFLIP, 713 + .rpf_count = 2, 714 + .wpf_count = 1, 715 + .uapi = true, 716 + }, { 721 717 .version = VI6_IP_VERSION_MODEL_VSPD_GEN3, 722 718 .model = "VSP2-D", 723 719 .gen = 3, 724 - .features = VSP1_HAS_BRU | VSP1_HAS_LIF | VSP1_HAS_WPF_VFLIP, 720 + .features = VSP1_HAS_BRU | VSP1_HAS_WPF_VFLIP, 721 + .lif_count = 1, 722 + .rpf_count = 5, 723 + .wpf_count = 2, 724 + .num_bru_inputs = 5, 725 + }, { 726 + .version = VI6_IP_VERSION_MODEL_VSPD_V3, 727 + .model = "VSP2-D", 728 + .gen = 3, 729 + .features = VSP1_HAS_BRS | VSP1_HAS_BRU, 730 + .lif_count = 1, 731 + .rpf_count = 5, 732 + .wpf_count = 1, 733 + .num_bru_inputs = 5, 734 + }, { 735 + .version = VI6_IP_VERSION_MODEL_VSPDL_GEN3, 736 + .model = "VSP2-DL", 737 + .gen = 3, 738 + .features = VSP1_HAS_BRS | VSP1_HAS_BRU, 739 + .lif_count = 2, 725 740 .rpf_count = 5, 726 741 .wpf_count = 2, 727 742 .num_bru_inputs = 5,
+25 -15
drivers/media/platform/vsp1/vsp1_entity.c
··· 24 24 #include "vsp1_pipe.h" 25 25 #include "vsp1_rwpf.h" 26 26 27 - static inline struct vsp1_entity * 28 - media_entity_to_vsp1_entity(struct media_entity *entity) 29 - { 30 - return container_of(entity, struct vsp1_entity, subdev.entity); 31 - } 32 - 33 27 void vsp1_entity_route_setup(struct vsp1_entity *entity, 34 28 struct vsp1_pipeline *pipe, 35 29 struct vsp1_dl_list *dl) 36 30 { 37 31 struct vsp1_entity *source; 38 - struct vsp1_entity *sink; 32 + u32 route; 39 33 40 34 if (entity->type == VSP1_ENTITY_HGO) { 41 35 u32 smppt; ··· 38 44 * The HGO is a special case, its routing is configured on the 39 45 * sink pad. 40 46 */ 41 - source = media_entity_to_vsp1_entity(entity->sources[0]); 47 + source = entity->sources[0]; 42 48 smppt = (pipe->output->entity.index << VI6_DPR_SMPPT_TGW_SHIFT) 43 49 | (source->route->output << VI6_DPR_SMPPT_PT_SHIFT); 44 50 ··· 51 57 * The HGT is a special case, its routing is configured on the 52 58 * sink pad. 53 59 */ 54 - source = media_entity_to_vsp1_entity(entity->sources[0]); 60 + source = entity->sources[0]; 55 61 smppt = (pipe->output->entity.index << VI6_DPR_SMPPT_TGW_SHIFT) 56 62 | (source->route->output << VI6_DPR_SMPPT_PT_SHIFT); 57 63 ··· 63 69 if (source->route->reg == 0) 64 70 return; 65 71 66 - sink = media_entity_to_vsp1_entity(source->sink); 67 - vsp1_dl_list_write(dl, source->route->reg, 68 - sink->route->inputs[source->sink_pad]); 72 + route = source->sink->route->inputs[source->sink_pad]; 73 + /* 74 + * The ILV and BRS share the same data path route. The extra BRSSEL bit 75 + * selects between the ILV and BRS. 76 + */ 77 + if (source->type == VSP1_ENTITY_BRS) 78 + route |= VI6_DPR_ROUTE_BRSSEL; 79 + vsp1_dl_list_write(dl, source->route->reg, route); 69 80 } 70 81 71 82 /* ----------------------------------------------------------------------------- ··· 315 316 * Media Operations 316 317 */ 317 318 319 + static inline struct vsp1_entity * 320 + media_entity_to_vsp1_entity(struct media_entity *entity) 321 + { 322 + return container_of(entity, struct vsp1_entity, subdev.entity); 323 + } 324 + 318 325 static int vsp1_entity_link_setup_source(const struct media_pad *source_pad, 319 326 const struct media_pad *sink_pad, 320 327 u32 flags) ··· 344 339 sink->type != VSP1_ENTITY_HGT) { 345 340 if (source->sink) 346 341 return -EBUSY; 347 - source->sink = sink_pad->entity; 342 + source->sink = sink; 348 343 source->sink_pad = sink_pad->index; 349 344 } 350 345 } else { ··· 360 355 u32 flags) 361 356 { 362 357 struct vsp1_entity *sink; 358 + struct vsp1_entity *source; 363 359 364 360 sink = media_entity_to_vsp1_entity(sink_pad->entity); 361 + source = media_entity_to_vsp1_entity(source_pad->entity); 365 362 366 363 if (flags & MEDIA_LNK_FL_ENABLED) { 367 364 /* Fan-in is limited to one. */ 368 365 if (sink->sources[sink_pad->index]) 369 366 return -EBUSY; 370 367 371 - sink->sources[sink_pad->index] = source_pad->entity; 368 + sink->sources[sink_pad->index] = source; 372 369 } else { 373 370 sink->sources[sink_pad->index] = NULL; 374 371 } ··· 457 450 { VI6_DPR_NODE_WPF(idx) }, VI6_DPR_NODE_WPF(idx) } 458 451 459 452 static const struct vsp1_route vsp1_routes[] = { 453 + { VSP1_ENTITY_BRS, 0, VI6_DPR_ILV_BRS_ROUTE, 454 + { VI6_DPR_NODE_BRS_IN(0), VI6_DPR_NODE_BRS_IN(1) }, 0 }, 460 455 { VSP1_ENTITY_BRU, 0, VI6_DPR_BRU_ROUTE, 461 456 { VI6_DPR_NODE_BRU_IN(0), VI6_DPR_NODE_BRU_IN(1), 462 457 VI6_DPR_NODE_BRU_IN(2), VI6_DPR_NODE_BRU_IN(3), ··· 468 459 { VSP1_ENTITY_HGT, 0, 0, { 0, }, 0 }, 469 460 VSP1_ENTITY_ROUTE(HSI), 470 461 VSP1_ENTITY_ROUTE(HST), 471 - { VSP1_ENTITY_LIF, 0, 0, { VI6_DPR_NODE_LIF, }, VI6_DPR_NODE_LIF }, 462 + { VSP1_ENTITY_LIF, 0, 0, { 0, }, 0 }, 463 + { VSP1_ENTITY_LIF, 1, 0, { 0, }, 0 }, 472 464 VSP1_ENTITY_ROUTE(LUT), 473 465 VSP1_ENTITY_ROUTE_RPF(0), 474 466 VSP1_ENTITY_ROUTE_RPF(1),
+3 -2
drivers/media/platform/vsp1/vsp1_entity.h
··· 23 23 struct vsp1_pipeline; 24 24 25 25 enum vsp1_entity_type { 26 + VSP1_ENTITY_BRS, 26 27 VSP1_ENTITY_BRU, 27 28 VSP1_ENTITY_CLU, 28 29 VSP1_ENTITY_HGO, ··· 105 104 struct media_pad *pads; 106 105 unsigned int source_pad; 107 106 108 - struct media_entity **sources; 109 - struct media_entity *sink; 107 + struct vsp1_entity **sources; 108 + struct vsp1_entity *sink; 110 109 unsigned int sink_pad; 111 110 112 111 struct v4l2_subdev subdev;
+3 -2
drivers/media/platform/vsp1/vsp1_lif.c
··· 30 30 static inline void vsp1_lif_write(struct vsp1_lif *lif, struct vsp1_dl_list *dl, 31 31 u32 reg, u32 data) 32 32 { 33 - vsp1_dl_list_write(dl, reg, data); 33 + vsp1_dl_list_write(dl, reg + lif->entity.index * VI6_LIF_OFFSET, data); 34 34 } 35 35 36 36 /* ----------------------------------------------------------------------------- ··· 165 165 * Initialization and Cleanup 166 166 */ 167 167 168 - struct vsp1_lif *vsp1_lif_create(struct vsp1_device *vsp1) 168 + struct vsp1_lif *vsp1_lif_create(struct vsp1_device *vsp1, unsigned int index) 169 169 { 170 170 struct vsp1_lif *lif; 171 171 int ret; ··· 176 176 177 177 lif->entity.ops = &lif_entity_ops; 178 178 lif->entity.type = VSP1_ENTITY_LIF; 179 + lif->entity.index = index; 179 180 180 181 /* 181 182 * The LIF is never exposed to userspace, but media entity registration
+1 -1
drivers/media/platform/vsp1/vsp1_lif.h
··· 32 32 return container_of(subdev, struct vsp1_lif, entity.subdev); 33 33 } 34 34 35 - struct vsp1_lif *vsp1_lif_create(struct vsp1_device *vsp1); 35 + struct vsp1_lif *vsp1_lif_create(struct vsp1_device *vsp1, unsigned int index); 36 36 37 37 #endif /* __VSP1_LIF_H__ */
+14 -13
drivers/media/platform/vsp1/vsp1_pipe.c
··· 335 335 if (pipe == NULL) 336 336 return; 337 337 338 + /* 339 + * If the DL commit raced with the frame end interrupt, the commit ends 340 + * up being postponed by one frame. @completed represents whether the 341 + * active frame was finished or postponed. 342 + */ 338 343 completed = vsp1_dlm_irq_frame_end(pipe->output->dlm); 339 - if (!completed) { 340 - /* 341 - * If the DL commit raced with the frame end interrupt, the 342 - * commit ends up being postponed by one frame. Return 343 - * immediately without calling the pipeline's frame end handler 344 - * or incrementing the sequence number. 345 - */ 346 - return; 347 - } 348 344 349 345 if (pipe->hgo) 350 346 vsp1_hgo_frame_end(pipe->hgo); ··· 348 352 if (pipe->hgt) 349 353 vsp1_hgt_frame_end(pipe->hgt); 350 354 355 + /* 356 + * Regardless of frame completion we still need to notify the pipe 357 + * frame_end to account for vblank events. 358 + */ 351 359 if (pipe->frame_end) 352 - pipe->frame_end(pipe); 360 + pipe->frame_end(pipe, completed); 353 361 354 362 pipe->sequence++; 355 363 } ··· 373 373 return; 374 374 375 375 /* 376 - * The BRU background color has a fixed alpha value set to 255, the 377 - * output alpha value is thus always equal to 255. 376 + * The BRU and BRS background color has a fixed alpha value set to 255, 377 + * the output alpha value is thus always equal to 255. 378 378 */ 379 - if (pipe->uds_input->type == VSP1_ENTITY_BRU) 379 + if (pipe->uds_input->type == VSP1_ENTITY_BRU || 380 + pipe->uds_input->type == VSP1_ENTITY_BRS) 380 381 alpha = 255; 381 382 382 383 vsp1_uds_set_alpha(pipe->uds, dl, alpha);
+1 -1
drivers/media/platform/vsp1/vsp1_pipe.h
··· 91 91 enum vsp1_pipeline_state state; 92 92 wait_queue_head_t wq; 93 93 94 - void (*frame_end)(struct vsp1_pipeline *pipe); 94 + void (*frame_end)(struct vsp1_pipeline *pipe, bool completed); 95 95 96 96 struct mutex lock; 97 97 struct kref kref;
+35 -11
drivers/media/platform/vsp1/vsp1_regs.h
··· 18 18 */ 19 19 20 20 #define VI6_CMD(n) (0x0000 + (n) * 4) 21 + #define VI6_CMD_UPDHDR (1 << 4) 21 22 #define VI6_CMD_STRCMD (1 << 0) 22 23 23 24 #define VI6_CLK_DCSWT 0x0018 ··· 239 238 #define VI6_WPF_SRCRPF_VIRACT_SUB (1 << 28) 240 239 #define VI6_WPF_SRCRPF_VIRACT_MST (2 << 28) 241 240 #define VI6_WPF_SRCRPF_VIRACT_MASK (3 << 28) 241 + #define VI6_WPF_SRCRPF_VIRACT2_DIS (0 << 24) 242 + #define VI6_WPF_SRCRPF_VIRACT2_SUB (1 << 24) 243 + #define VI6_WPF_SRCRPF_VIRACT2_MST (2 << 24) 244 + #define VI6_WPF_SRCRPF_VIRACT2_MASK (3 << 24) 242 245 #define VI6_WPF_SRCRPF_RPF_ACT_DIS(n) (0 << ((n) * 2)) 243 246 #define VI6_WPF_SRCRPF_RPF_ACT_SUB(n) (1 << ((n) * 2)) 244 247 #define VI6_WPF_SRCRPF_RPF_ACT_MST(n) (2 << ((n) * 2)) ··· 326 321 #define VI6_DPR_HST_ROUTE 0x2044 327 322 #define VI6_DPR_HSI_ROUTE 0x2048 328 323 #define VI6_DPR_BRU_ROUTE 0x204c 324 + #define VI6_DPR_ILV_BRS_ROUTE 0x2050 325 + #define VI6_DPR_ROUTE_BRSSEL (1 << 28) 329 326 #define VI6_DPR_ROUTE_FXA_MASK (0xff << 16) 330 327 #define VI6_DPR_ROUTE_FXA_SHIFT 16 331 328 #define VI6_DPR_ROUTE_FP_MASK (0x3f << 8) ··· 351 344 #define VI6_DPR_NODE_CLU 29 352 345 #define VI6_DPR_NODE_HST 30 353 346 #define VI6_DPR_NODE_HSI 31 354 - #define VI6_DPR_NODE_LIF 55 347 + #define VI6_DPR_NODE_BRS_IN(n) (38 + (n)) 348 + #define VI6_DPR_NODE_LIF 55 /* Gen2 only */ 355 349 #define VI6_DPR_NODE_WPF(n) (56 + (n)) 356 350 #define VI6_DPR_NODE_UNUSED 63 357 351 ··· 484 476 #define VI6_HSI_CTRL_EN (1 << 0) 485 477 486 478 /* ----------------------------------------------------------------------------- 487 - * BRU Control Registers 479 + * BRS and BRU Control Registers 488 480 */ 489 481 490 482 #define VI6_ROP_NOP 0 ··· 504 496 #define VI6_ROP_NAND 14 505 497 #define VI6_ROP_SET 15 506 498 507 - #define VI6_BRU_INCTRL 0x2c00 499 + #define VI6_BRU_BASE 0x2c00 500 + #define VI6_BRS_BASE 0x3900 501 + 502 + #define VI6_BRU_INCTRL 0x0000 508 503 #define VI6_BRU_INCTRL_NRM (1 << 28) 509 504 #define VI6_BRU_INCTRL_DnON (1 << (16 + (n))) 510 505 #define VI6_BRU_INCTRL_DITHn_OFF (0 << ((n) * 4)) ··· 519 508 #define VI6_BRU_INCTRL_DITHn_MASK (7 << ((n) * 4)) 520 509 #define VI6_BRU_INCTRL_DITHn_SHIFT ((n) * 4) 521 510 522 - #define VI6_BRU_VIRRPF_SIZE 0x2c04 511 + #define VI6_BRU_VIRRPF_SIZE 0x0004 523 512 #define VI6_BRU_VIRRPF_SIZE_HSIZE_MASK (0x1fff << 16) 524 513 #define VI6_BRU_VIRRPF_SIZE_HSIZE_SHIFT 16 525 514 #define VI6_BRU_VIRRPF_SIZE_VSIZE_MASK (0x1fff << 0) 526 515 #define VI6_BRU_VIRRPF_SIZE_VSIZE_SHIFT 0 527 516 528 - #define VI6_BRU_VIRRPF_LOC 0x2c08 517 + #define VI6_BRU_VIRRPF_LOC 0x0008 529 518 #define VI6_BRU_VIRRPF_LOC_HCOORD_MASK (0x1fff << 16) 530 519 #define VI6_BRU_VIRRPF_LOC_HCOORD_SHIFT 16 531 520 #define VI6_BRU_VIRRPF_LOC_VCOORD_MASK (0x1fff << 0) 532 521 #define VI6_BRU_VIRRPF_LOC_VCOORD_SHIFT 0 533 522 534 - #define VI6_BRU_VIRRPF_COL 0x2c0c 523 + #define VI6_BRU_VIRRPF_COL 0x000c 535 524 #define VI6_BRU_VIRRPF_COL_A_MASK (0xff << 24) 536 525 #define VI6_BRU_VIRRPF_COL_A_SHIFT 24 537 526 #define VI6_BRU_VIRRPF_COL_RCR_MASK (0xff << 16) ··· 541 530 #define VI6_BRU_VIRRPF_COL_BCB_MASK (0xff << 0) 542 531 #define VI6_BRU_VIRRPF_COL_BCB_SHIFT 0 543 532 544 - #define VI6_BRU_CTRL(n) (0x2c10 + (n) * 8 + ((n) <= 3 ? 0 : 4)) 533 + #define VI6_BRU_CTRL(n) (0x0010 + (n) * 8 + ((n) <= 3 ? 0 : 4)) 545 534 #define VI6_BRU_CTRL_RBC (1 << 31) 546 535 #define VI6_BRU_CTRL_DSTSEL_BRUIN(n) (((n) <= 3 ? (n) : (n)+1) << 20) 547 536 #define VI6_BRU_CTRL_DSTSEL_VRPF (4 << 20) ··· 554 543 #define VI6_BRU_CTRL_AROP(rop) ((rop) << 0) 555 544 #define VI6_BRU_CTRL_AROP_MASK (0xf << 0) 556 545 557 - #define VI6_BRU_BLD(n) (0x2c14 + (n) * 8 + ((n) <= 3 ? 0 : 4)) 546 + #define VI6_BRU_BLD(n) (0x0014 + (n) * 8 + ((n) <= 3 ? 0 : 4)) 558 547 #define VI6_BRU_BLD_CBES (1 << 31) 559 548 #define VI6_BRU_BLD_CCMDX_DST_A (0 << 28) 560 549 #define VI6_BRU_BLD_CCMDX_255_DST_A (1 << 28) ··· 587 576 #define VI6_BRU_BLD_COEFY_MASK (0xff << 0) 588 577 #define VI6_BRU_BLD_COEFY_SHIFT 0 589 578 590 - #define VI6_BRU_ROP 0x2c30 579 + #define VI6_BRU_ROP 0x0030 /* Only available on BRU */ 591 580 #define VI6_BRU_ROP_DSTSEL_BRUIN(n) (((n) <= 3 ? (n) : (n)+1) << 20) 592 581 #define VI6_BRU_ROP_DSTSEL_VRPF (4 << 20) 593 582 #define VI6_BRU_ROP_DSTSEL_MASK (7 << 20) ··· 664 653 * LIF Control Registers 665 654 */ 666 655 656 + #define VI6_LIF_OFFSET (-0x100) 657 + 667 658 #define VI6_LIF_CTRL 0x3b00 668 659 #define VI6_LIF_CTRL_OBTH_MASK (0x7ff << 16) 669 660 #define VI6_LIF_CTRL_OBTH_SHIFT 16 ··· 702 689 #define VI6_IP_VERSION_MODEL_VSPBD_GEN3 (0x15 << 8) 703 690 #define VI6_IP_VERSION_MODEL_VSPBC_GEN3 (0x16 << 8) 704 691 #define VI6_IP_VERSION_MODEL_VSPD_GEN3 (0x17 << 8) 692 + #define VI6_IP_VERSION_MODEL_VSPD_V3 (0x18 << 8) 693 + #define VI6_IP_VERSION_MODEL_VSPDL_GEN3 (0x19 << 8) 694 + #define VI6_IP_VERSION_MODEL_VSPBS_GEN3 (0x1a << 8) 705 695 #define VI6_IP_VERSION_SOC_MASK (0xff << 0) 706 - #define VI6_IP_VERSION_SOC_H (0x01 << 0) 707 - #define VI6_IP_VERSION_SOC_M (0x02 << 0) 696 + #define VI6_IP_VERSION_SOC_H2 (0x01 << 0) 697 + #define VI6_IP_VERSION_SOC_V2H (0x01 << 0) 698 + #define VI6_IP_VERSION_SOC_V3M (0x01 << 0) 699 + #define VI6_IP_VERSION_SOC_M2 (0x02 << 0) 700 + #define VI6_IP_VERSION_SOC_M3W (0x02 << 0) 701 + #define VI6_IP_VERSION_SOC_V3H (0x02 << 0) 702 + #define VI6_IP_VERSION_SOC_H3 (0x03 << 0) 703 + #define VI6_IP_VERSION_SOC_D3 (0x04 << 0) 704 + #define VI6_IP_VERSION_SOC_M3N (0x04 << 0) 705 + #define VI6_IP_VERSION_SOC_E3 (0x04 << 0) 708 706 709 707 /* ----------------------------------------------------------------------------- 710 708 * RPF CLUT Registers
+45 -24
drivers/media/platform/vsp1/vsp1_video.c
··· 440 440 vsp1_pipeline_run(pipe); 441 441 } 442 442 443 - static void vsp1_video_pipeline_frame_end(struct vsp1_pipeline *pipe) 443 + static void vsp1_video_pipeline_frame_end(struct vsp1_pipeline *pipe, 444 + bool completed) 444 445 { 445 446 struct vsp1_device *vsp1 = pipe->output->entity.vsp1; 446 447 enum vsp1_pipeline_state state; 447 448 unsigned long flags; 448 449 unsigned int i; 450 + 451 + /* M2M Pipelines should never call here with an incomplete frame. */ 452 + WARN_ON_ONCE(!completed); 449 453 450 454 spin_lock_irqsave(&pipe->irqlock, flags); 451 455 ··· 485 481 struct media_entity_enum ent_enum; 486 482 struct vsp1_entity *entity; 487 483 struct media_pad *pad; 488 - bool bru_found = false; 484 + struct vsp1_bru *bru = NULL; 489 485 int ret; 490 486 491 487 ret = media_entity_enum_init(&ent_enum, &input->entity.vsp1->media_dev); ··· 515 511 media_entity_to_v4l2_subdev(pad->entity)); 516 512 517 513 /* 518 - * A BRU is present in the pipeline, store the BRU input pad 514 + * A BRU or BRS is present in the pipeline, store its input pad 519 515 * number in the input RPF for use when configuring the RPF. 520 516 */ 521 - if (entity->type == VSP1_ENTITY_BRU) { 522 - struct vsp1_bru *bru = to_bru(&entity->subdev); 517 + if (entity->type == VSP1_ENTITY_BRU || 518 + entity->type == VSP1_ENTITY_BRS) { 519 + /* BRU and BRS can't be chained. */ 520 + if (bru) { 521 + ret = -EPIPE; 522 + goto out; 523 + } 523 524 525 + bru = to_bru(&entity->subdev); 524 526 bru->inputs[pad->index].rpf = input; 525 527 input->bru_input = pad->index; 526 - 527 - bru_found = true; 528 528 } 529 529 530 530 /* We've reached the WPF, we're done. */ ··· 550 542 } 551 543 552 544 pipe->uds = entity; 553 - pipe->uds_input = bru_found ? pipe->bru 554 - : &input->entity; 545 + pipe->uds_input = bru ? &bru->entity : &input->entity; 555 546 } 556 547 557 548 /* Follow the source link, ignoring any HGO or HGT. */ ··· 596 589 e = to_vsp1_entity(subdev); 597 590 list_add_tail(&e->list_pipe, &pipe->entities); 598 591 599 - if (e->type == VSP1_ENTITY_RPF) { 592 + switch (e->type) { 593 + case VSP1_ENTITY_RPF: 600 594 rwpf = to_rwpf(subdev); 601 595 pipe->inputs[rwpf->entity.index] = rwpf; 602 596 rwpf->video->pipe_index = ++pipe->num_inputs; 603 597 rwpf->pipe = pipe; 604 - } else if (e->type == VSP1_ENTITY_WPF) { 598 + break; 599 + 600 + case VSP1_ENTITY_WPF: 605 601 rwpf = to_rwpf(subdev); 606 602 pipe->output = rwpf; 607 603 rwpf->video->pipe_index = 0; 608 604 rwpf->pipe = pipe; 609 - } else if (e->type == VSP1_ENTITY_LIF) { 605 + break; 606 + 607 + case VSP1_ENTITY_LIF: 610 608 pipe->lif = e; 611 - } else if (e->type == VSP1_ENTITY_BRU) { 609 + break; 610 + 611 + case VSP1_ENTITY_BRU: 612 + case VSP1_ENTITY_BRS: 612 613 pipe->bru = e; 613 - } else if (e->type == VSP1_ENTITY_HGO) { 614 - struct vsp1_hgo *hgo = to_hgo(subdev); 614 + break; 615 615 616 + case VSP1_ENTITY_HGO: 616 617 pipe->hgo = e; 617 - hgo->histo.pipe = pipe; 618 - } else if (e->type == VSP1_ENTITY_HGT) { 619 - struct vsp1_hgt *hgt = to_hgt(subdev); 618 + to_hgo(subdev)->histo.pipe = pipe; 619 + break; 620 620 621 + case VSP1_ENTITY_HGT: 621 622 pipe->hgt = e; 622 - hgt->histo.pipe = pipe; 623 + to_hgt(subdev)->histo.pipe = pipe; 624 + break; 625 + 626 + default: 627 + break; 623 628 } 624 629 } 625 630 ··· 815 796 struct vsp1_uds *uds = to_uds(&pipe->uds->subdev); 816 797 817 798 /* 818 - * If a BRU is present in the pipeline before the UDS, the alpha 819 - * component doesn't need to be scaled as the BRU output alpha 820 - * value is fixed to 255. Otherwise we need to scale the alpha 821 - * component only when available at the input RPF. 799 + * If a BRU or BRS is present in the pipeline before the UDS, 800 + * the alpha component doesn't need to be scaled as the BRU and 801 + * BRS output alpha value is fixed to 255. Otherwise we need to 802 + * scale the alpha component only when available at the input 803 + * RPF. 822 804 */ 823 - if (pipe->uds_input->type == VSP1_ENTITY_BRU) { 805 + if (pipe->uds_input->type == VSP1_ENTITY_BRU || 806 + pipe->uds_input->type == VSP1_ENTITY_BRS) { 824 807 uds->scale_alpha = false; 825 808 } else { 826 809 struct vsp1_rwpf *rpf =
+3 -1
drivers/media/platform/vsp1/vsp1_wpf.c
··· 453 453 } 454 454 455 455 if (pipe->bru || pipe->num_inputs > 1) 456 - srcrpf |= VI6_WPF_SRCRPF_VIRACT_MST; 456 + srcrpf |= pipe->bru->type == VSP1_ENTITY_BRU 457 + ? VI6_WPF_SRCRPF_VIRACT_MST 458 + : VI6_WPF_SRCRPF_VIRACT2_MST; 457 459 458 460 vsp1_wpf_write(wpf, dl, VI6_WPF_SRCRPF, srcrpf); 459 461
+7 -5
include/media/vsp1.h
··· 34 34 unsigned int width; 35 35 unsigned int height; 36 36 37 - void (*callback)(void *); 37 + void (*callback)(void *, bool); 38 38 void *callback_data; 39 39 }; 40 40 41 - int vsp1_du_setup_lif(struct device *dev, const struct vsp1_du_lif_config *cfg); 41 + int vsp1_du_setup_lif(struct device *dev, unsigned int pipe_index, 42 + const struct vsp1_du_lif_config *cfg); 42 43 43 44 struct vsp1_du_atomic_config { 44 45 u32 pixelformat; ··· 51 50 unsigned int zpos; 52 51 }; 53 52 54 - void vsp1_du_atomic_begin(struct device *dev); 55 - int vsp1_du_atomic_update(struct device *dev, unsigned int rpf, 53 + void vsp1_du_atomic_begin(struct device *dev, unsigned int pipe_index); 54 + int vsp1_du_atomic_update(struct device *dev, unsigned int pipe_index, 55 + unsigned int rpf, 56 56 const struct vsp1_du_atomic_config *cfg); 57 - void vsp1_du_atomic_flush(struct device *dev); 57 + void vsp1_du_atomic_flush(struct device *dev, unsigned int pipe_index); 58 58 int vsp1_du_map_sg(struct device *dev, struct sg_table *sgt); 59 59 void vsp1_du_unmap_sg(struct device *dev, struct sg_table *sgt); 60 60