Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2017 NVIDIA CORPORATION. All rights reserved.
4 */
5
6#include <linux/clk.h>
7#include <linux/delay.h>
8#include <linux/host1x.h>
9#include <linux/module.h>
10#include <linux/of.h>
11#include <linux/of_device.h>
12#include <linux/of_graph.h>
13#include <linux/platform_device.h>
14#include <linux/pm_runtime.h>
15#include <linux/reset.h>
16
17#include <drm/drm_atomic.h>
18#include <drm/drm_atomic_helper.h>
19#include <drm/drm_fourcc.h>
20#include <drm/drm_probe_helper.h>
21
22#include "drm.h"
23#include "dc.h"
24#include "plane.h"
25
26static const u32 tegra_shared_plane_formats[] = {
27 DRM_FORMAT_ARGB1555,
28 DRM_FORMAT_RGB565,
29 DRM_FORMAT_RGBA5551,
30 DRM_FORMAT_ARGB8888,
31 DRM_FORMAT_ABGR8888,
32 /* new on Tegra114 */
33 DRM_FORMAT_ABGR4444,
34 DRM_FORMAT_ABGR1555,
35 DRM_FORMAT_BGRA5551,
36 DRM_FORMAT_XRGB1555,
37 DRM_FORMAT_RGBX5551,
38 DRM_FORMAT_XBGR1555,
39 DRM_FORMAT_BGRX5551,
40 DRM_FORMAT_BGR565,
41 DRM_FORMAT_XRGB8888,
42 DRM_FORMAT_XBGR8888,
43 /* planar formats */
44 DRM_FORMAT_UYVY,
45 DRM_FORMAT_YUYV,
46 DRM_FORMAT_YUV420,
47 DRM_FORMAT_YUV422,
48};
49
50static const u64 tegra_shared_plane_modifiers[] = {
51 DRM_FORMAT_MOD_LINEAR,
52 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0),
53 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1),
54 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2),
55 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3),
56 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4),
57 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5),
58 /*
59 * The GPU sector layout is only supported on Tegra194, but these will
60 * be filtered out later on by ->format_mod_supported() on SoCs where
61 * it isn't supported.
62 */
63 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
64 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
65 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
66 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
67 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
68 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
69 /* sentinel */
70 DRM_FORMAT_MOD_INVALID
71};
72
73static inline unsigned int tegra_plane_offset(struct tegra_plane *plane,
74 unsigned int offset)
75{
76 if (offset >= 0x500 && offset <= 0x581) {
77 offset = 0x000 + (offset - 0x500);
78 return plane->offset + offset;
79 }
80
81 if (offset >= 0x700 && offset <= 0x73c) {
82 offset = 0x180 + (offset - 0x700);
83 return plane->offset + offset;
84 }
85
86 if (offset >= 0x800 && offset <= 0x83e) {
87 offset = 0x1c0 + (offset - 0x800);
88 return plane->offset + offset;
89 }
90
91 dev_WARN(plane->dc->dev, "invalid offset: %x\n", offset);
92
93 return plane->offset + offset;
94}
95
96static inline u32 tegra_plane_readl(struct tegra_plane *plane,
97 unsigned int offset)
98{
99 return tegra_dc_readl(plane->dc, tegra_plane_offset(plane, offset));
100}
101
102static inline void tegra_plane_writel(struct tegra_plane *plane, u32 value,
103 unsigned int offset)
104{
105 tegra_dc_writel(plane->dc, value, tegra_plane_offset(plane, offset));
106}
107
108static int tegra_windowgroup_enable(struct tegra_windowgroup *wgrp)
109{
110 int err = 0;
111
112 mutex_lock(&wgrp->lock);
113
114 if (wgrp->usecount == 0) {
115 err = host1x_client_resume(wgrp->parent);
116 if (err < 0) {
117 dev_err(wgrp->parent->dev, "failed to resume: %d\n", err);
118 goto unlock;
119 }
120
121 reset_control_deassert(wgrp->rst);
122 }
123
124 wgrp->usecount++;
125
126unlock:
127 mutex_unlock(&wgrp->lock);
128 return err;
129}
130
131static void tegra_windowgroup_disable(struct tegra_windowgroup *wgrp)
132{
133 int err;
134
135 mutex_lock(&wgrp->lock);
136
137 if (wgrp->usecount == 1) {
138 err = reset_control_assert(wgrp->rst);
139 if (err < 0) {
140 pr_err("failed to assert reset for window group %u\n",
141 wgrp->index);
142 }
143
144 host1x_client_suspend(wgrp->parent);
145 }
146
147 wgrp->usecount--;
148 mutex_unlock(&wgrp->lock);
149}
150
151int tegra_display_hub_prepare(struct tegra_display_hub *hub)
152{
153 unsigned int i;
154
155 /*
156 * XXX Enabling/disabling windowgroups needs to happen when the owner
157 * display controller is disabled. There's currently no good point at
158 * which this could be executed, so unconditionally enable all window
159 * groups for now.
160 */
161 for (i = 0; i < hub->soc->num_wgrps; i++) {
162 struct tegra_windowgroup *wgrp = &hub->wgrps[i];
163
164 /* Skip orphaned window group whose parent DC is disabled */
165 if (wgrp->parent)
166 tegra_windowgroup_enable(wgrp);
167 }
168
169 return 0;
170}
171
172void tegra_display_hub_cleanup(struct tegra_display_hub *hub)
173{
174 unsigned int i;
175
176 /*
177 * XXX Remove this once window groups can be more fine-grainedly
178 * enabled and disabled.
179 */
180 for (i = 0; i < hub->soc->num_wgrps; i++) {
181 struct tegra_windowgroup *wgrp = &hub->wgrps[i];
182
183 /* Skip orphaned window group whose parent DC is disabled */
184 if (wgrp->parent)
185 tegra_windowgroup_disable(wgrp);
186 }
187}
188
189static void tegra_shared_plane_update(struct tegra_plane *plane)
190{
191 struct tegra_dc *dc = plane->dc;
192 unsigned long timeout;
193 u32 mask, value;
194
195 mask = COMMON_UPDATE | WIN_A_UPDATE << plane->base.index;
196 tegra_dc_writel(dc, mask, DC_CMD_STATE_CONTROL);
197
198 timeout = jiffies + msecs_to_jiffies(1000);
199
200 while (time_before(jiffies, timeout)) {
201 value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
202 if ((value & mask) == 0)
203 break;
204
205 usleep_range(100, 400);
206 }
207}
208
209static void tegra_shared_plane_activate(struct tegra_plane *plane)
210{
211 struct tegra_dc *dc = plane->dc;
212 unsigned long timeout;
213 u32 mask, value;
214
215 mask = COMMON_ACTREQ | WIN_A_ACT_REQ << plane->base.index;
216 tegra_dc_writel(dc, mask, DC_CMD_STATE_CONTROL);
217
218 timeout = jiffies + msecs_to_jiffies(1000);
219
220 while (time_before(jiffies, timeout)) {
221 value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
222 if ((value & mask) == 0)
223 break;
224
225 usleep_range(100, 400);
226 }
227}
228
229static unsigned int
230tegra_shared_plane_get_owner(struct tegra_plane *plane, struct tegra_dc *dc)
231{
232 unsigned int offset =
233 tegra_plane_offset(plane, DC_WIN_CORE_WINDOWGROUP_SET_CONTROL);
234
235 return tegra_dc_readl(dc, offset) & OWNER_MASK;
236}
237
238static bool tegra_dc_owns_shared_plane(struct tegra_dc *dc,
239 struct tegra_plane *plane)
240{
241 struct device *dev = dc->dev;
242
243 if (tegra_shared_plane_get_owner(plane, dc) == dc->pipe) {
244 if (plane->dc == dc)
245 return true;
246
247 dev_WARN(dev, "head %u owns window %u but is not attached\n",
248 dc->pipe, plane->index);
249 }
250
251 return false;
252}
253
254static int tegra_shared_plane_set_owner(struct tegra_plane *plane,
255 struct tegra_dc *new)
256{
257 unsigned int offset =
258 tegra_plane_offset(plane, DC_WIN_CORE_WINDOWGROUP_SET_CONTROL);
259 struct tegra_dc *old = plane->dc, *dc = new ? new : old;
260 struct device *dev = new ? new->dev : old->dev;
261 unsigned int owner, index = plane->index;
262 u32 value;
263
264 value = tegra_dc_readl(dc, offset);
265 owner = value & OWNER_MASK;
266
267 if (new && (owner != OWNER_MASK && owner != new->pipe)) {
268 dev_WARN(dev, "window %u owned by head %u\n", index, owner);
269 return -EBUSY;
270 }
271
272 /*
273 * This seems to happen whenever the head has been disabled with one
274 * or more windows being active. This is harmless because we'll just
275 * reassign the window to the new head anyway.
276 */
277 if (old && owner == OWNER_MASK)
278 dev_dbg(dev, "window %u not owned by head %u but %u\n", index,
279 old->pipe, owner);
280
281 value &= ~OWNER_MASK;
282
283 if (new)
284 value |= OWNER(new->pipe);
285 else
286 value |= OWNER_MASK;
287
288 tegra_dc_writel(dc, value, offset);
289
290 plane->dc = new;
291
292 return 0;
293}
294
295static void tegra_dc_assign_shared_plane(struct tegra_dc *dc,
296 struct tegra_plane *plane)
297{
298 u32 value;
299 int err;
300
301 if (!tegra_dc_owns_shared_plane(dc, plane)) {
302 err = tegra_shared_plane_set_owner(plane, dc);
303 if (err < 0)
304 return;
305 }
306
307 value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_LINEBUF_CONFIG);
308 value |= MODE_FOUR_LINES;
309 tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_LINEBUF_CONFIG);
310
311 value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_FETCH_METER);
312 value = SLOTS(1);
313 tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_FETCH_METER);
314
315 /* disable watermark */
316 value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLA);
317 value &= ~LATENCY_CTL_MODE_ENABLE;
318 tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLA);
319
320 value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLB);
321 value |= WATERMARK_MASK;
322 tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLB);
323
324 /* pipe meter */
325 value = tegra_plane_readl(plane, DC_WIN_CORE_PRECOMP_WGRP_PIPE_METER);
326 value = PIPE_METER_INT(0) | PIPE_METER_FRAC(0);
327 tegra_plane_writel(plane, value, DC_WIN_CORE_PRECOMP_WGRP_PIPE_METER);
328
329 /* mempool entries */
330 value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_POOL_CONFIG);
331 value = MEMPOOL_ENTRIES(0x331);
332 tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_POOL_CONFIG);
333
334 value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_THREAD_GROUP);
335 value &= ~THREAD_NUM_MASK;
336 value |= THREAD_NUM(plane->base.index);
337 value |= THREAD_GROUP_ENABLE;
338 tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_THREAD_GROUP);
339
340 tegra_shared_plane_update(plane);
341 tegra_shared_plane_activate(plane);
342}
343
344static void tegra_dc_remove_shared_plane(struct tegra_dc *dc,
345 struct tegra_plane *plane)
346{
347 tegra_shared_plane_set_owner(plane, NULL);
348}
349
350static int tegra_shared_plane_atomic_check(struct drm_plane *plane,
351 struct drm_atomic_state *state)
352{
353 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
354 plane);
355 struct tegra_plane_state *plane_state = to_tegra_plane_state(new_plane_state);
356 struct tegra_shared_plane *tegra = to_tegra_shared_plane(plane);
357 struct tegra_bo_tiling *tiling = &plane_state->tiling;
358 struct tegra_dc *dc = to_tegra_dc(new_plane_state->crtc);
359 int err;
360
361 /* no need for further checks if the plane is being disabled */
362 if (!new_plane_state->crtc || !new_plane_state->fb)
363 return 0;
364
365 err = tegra_plane_format(new_plane_state->fb->format->format,
366 &plane_state->format,
367 &plane_state->swap);
368 if (err < 0)
369 return err;
370
371 err = tegra_fb_get_tiling(new_plane_state->fb, tiling);
372 if (err < 0)
373 return err;
374
375 if (tiling->mode == TEGRA_BO_TILING_MODE_BLOCK &&
376 !dc->soc->supports_block_linear) {
377 DRM_ERROR("hardware doesn't support block linear mode\n");
378 return -EINVAL;
379 }
380
381 if (tiling->sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU &&
382 !dc->soc->supports_sector_layout) {
383 DRM_ERROR("hardware doesn't support GPU sector layout\n");
384 return -EINVAL;
385 }
386
387 /*
388 * Tegra doesn't support different strides for U and V planes so we
389 * error out if the user tries to display a framebuffer with such a
390 * configuration.
391 */
392 if (new_plane_state->fb->format->num_planes > 2) {
393 if (new_plane_state->fb->pitches[2] != new_plane_state->fb->pitches[1]) {
394 DRM_ERROR("unsupported UV-plane configuration\n");
395 return -EINVAL;
396 }
397 }
398
399 /* XXX scaling is not yet supported, add a check here */
400
401 err = tegra_plane_state_add(&tegra->base, new_plane_state);
402 if (err < 0)
403 return err;
404
405 return 0;
406}
407
408static void tegra_shared_plane_atomic_disable(struct drm_plane *plane,
409 struct drm_atomic_state *state)
410{
411 struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
412 plane);
413 struct tegra_plane *p = to_tegra_plane(plane);
414 struct tegra_dc *dc;
415 u32 value;
416 int err;
417
418 /* rien ne va plus */
419 if (!old_state || !old_state->crtc)
420 return;
421
422 dc = to_tegra_dc(old_state->crtc);
423
424 err = host1x_client_resume(&dc->client);
425 if (err < 0) {
426 dev_err(dc->dev, "failed to resume: %d\n", err);
427 return;
428 }
429
430 /*
431 * XXX Legacy helpers seem to sometimes call ->atomic_disable() even
432 * on planes that are already disabled. Make sure we fallback to the
433 * head for this particular state instead of crashing.
434 */
435 if (WARN_ON(p->dc == NULL))
436 p->dc = dc;
437
438 value = tegra_plane_readl(p, DC_WIN_WIN_OPTIONS);
439 value &= ~WIN_ENABLE;
440 tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
441
442 tegra_dc_remove_shared_plane(dc, p);
443
444 host1x_client_suspend(&dc->client);
445}
446
447static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
448 struct drm_atomic_state *state)
449{
450 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
451 plane);
452 struct tegra_plane_state *tegra_plane_state = to_tegra_plane_state(new_state);
453 struct tegra_dc *dc = to_tegra_dc(new_state->crtc);
454 unsigned int zpos = new_state->normalized_zpos;
455 struct drm_framebuffer *fb = new_state->fb;
456 struct tegra_plane *p = to_tegra_plane(plane);
457 dma_addr_t base;
458 u32 value;
459 int err;
460
461 /* rien ne va plus */
462 if (!new_state->crtc || !new_state->fb)
463 return;
464
465 if (!new_state->visible) {
466 tegra_shared_plane_atomic_disable(plane, state);
467 return;
468 }
469
470 err = host1x_client_resume(&dc->client);
471 if (err < 0) {
472 dev_err(dc->dev, "failed to resume: %d\n", err);
473 return;
474 }
475
476 tegra_dc_assign_shared_plane(dc, p);
477
478 tegra_plane_writel(p, VCOUNTER, DC_WIN_CORE_ACT_CONTROL);
479
480 /* blending */
481 value = BLEND_FACTOR_DST_ALPHA_ZERO | BLEND_FACTOR_SRC_ALPHA_K2 |
482 BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_SRC |
483 BLEND_FACTOR_SRC_COLOR_K1_TIMES_SRC;
484 tegra_plane_writel(p, value, DC_WIN_BLEND_MATCH_SELECT);
485
486 value = BLEND_FACTOR_DST_ALPHA_ZERO | BLEND_FACTOR_SRC_ALPHA_K2 |
487 BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_SRC |
488 BLEND_FACTOR_SRC_COLOR_K1_TIMES_SRC;
489 tegra_plane_writel(p, value, DC_WIN_BLEND_NOMATCH_SELECT);
490
491 value = K2(255) | K1(255) | WINDOW_LAYER_DEPTH(255 - zpos);
492 tegra_plane_writel(p, value, DC_WIN_BLEND_LAYER_CONTROL);
493
494 /* bypass scaling */
495 value = HORIZONTAL_TAPS_5 | VERTICAL_TAPS_5;
496 tegra_plane_writel(p, value, DC_WIN_WINDOWGROUP_SET_CONTROL_INPUT_SCALER);
497
498 value = INPUT_SCALER_VBYPASS | INPUT_SCALER_HBYPASS;
499 tegra_plane_writel(p, value, DC_WIN_WINDOWGROUP_SET_INPUT_SCALER_USAGE);
500
501 /* disable compression */
502 tegra_plane_writel(p, 0, DC_WINBUF_CDE_CONTROL);
503
504 base = tegra_plane_state->iova[0] + fb->offsets[0];
505
506#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
507 /*
508 * Physical address bit 39 in Tegra194 is used as a switch for special
509 * logic that swizzles the memory using either the legacy Tegra or the
510 * dGPU sector layout.
511 */
512 if (tegra_plane_state->tiling.sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU)
513 base |= BIT(39);
514#endif
515
516 tegra_plane_writel(p, tegra_plane_state->format, DC_WIN_COLOR_DEPTH);
517 tegra_plane_writel(p, 0, DC_WIN_PRECOMP_WGRP_PARAMS);
518
519 value = V_POSITION(new_state->crtc_y) |
520 H_POSITION(new_state->crtc_x);
521 tegra_plane_writel(p, value, DC_WIN_POSITION);
522
523 value = V_SIZE(new_state->crtc_h) | H_SIZE(new_state->crtc_w);
524 tegra_plane_writel(p, value, DC_WIN_SIZE);
525
526 value = WIN_ENABLE | COLOR_EXPAND;
527 tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
528
529 value = V_SIZE(new_state->crtc_h) | H_SIZE(new_state->crtc_w);
530 tegra_plane_writel(p, value, DC_WIN_CROPPED_SIZE);
531
532 tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI);
533 tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR);
534
535 value = PITCH(fb->pitches[0]);
536 tegra_plane_writel(p, value, DC_WIN_PLANAR_STORAGE);
537
538 value = CLAMP_BEFORE_BLEND | DEGAMMA_SRGB | INPUT_RANGE_FULL;
539 tegra_plane_writel(p, value, DC_WIN_SET_PARAMS);
540
541 value = OFFSET_X(new_state->src_y >> 16) |
542 OFFSET_Y(new_state->src_x >> 16);
543 tegra_plane_writel(p, value, DC_WINBUF_CROPPED_POINT);
544
545 if (dc->soc->supports_block_linear) {
546 unsigned long height = tegra_plane_state->tiling.value;
547
548 /* XXX */
549 switch (tegra_plane_state->tiling.mode) {
550 case TEGRA_BO_TILING_MODE_PITCH:
551 value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(0) |
552 DC_WINBUF_SURFACE_KIND_PITCH;
553 break;
554
555 /* XXX not supported on Tegra186 and later */
556 case TEGRA_BO_TILING_MODE_TILED:
557 value = DC_WINBUF_SURFACE_KIND_TILED;
558 break;
559
560 case TEGRA_BO_TILING_MODE_BLOCK:
561 value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(height) |
562 DC_WINBUF_SURFACE_KIND_BLOCK;
563 break;
564 }
565
566 tegra_plane_writel(p, value, DC_WINBUF_SURFACE_KIND);
567 }
568
569 /* disable gamut CSC */
570 value = tegra_plane_readl(p, DC_WIN_WINDOW_SET_CONTROL);
571 value &= ~CONTROL_CSC_ENABLE;
572 tegra_plane_writel(p, value, DC_WIN_WINDOW_SET_CONTROL);
573
574 host1x_client_suspend(&dc->client);
575}
576
577static const struct drm_plane_helper_funcs tegra_shared_plane_helper_funcs = {
578 .prepare_fb = tegra_plane_prepare_fb,
579 .cleanup_fb = tegra_plane_cleanup_fb,
580 .atomic_check = tegra_shared_plane_atomic_check,
581 .atomic_update = tegra_shared_plane_atomic_update,
582 .atomic_disable = tegra_shared_plane_atomic_disable,
583};
584
585struct drm_plane *tegra_shared_plane_create(struct drm_device *drm,
586 struct tegra_dc *dc,
587 unsigned int wgrp,
588 unsigned int index)
589{
590 enum drm_plane_type type = DRM_PLANE_TYPE_OVERLAY;
591 struct tegra_drm *tegra = drm->dev_private;
592 struct tegra_display_hub *hub = tegra->hub;
593 struct tegra_shared_plane *plane;
594 unsigned int possible_crtcs;
595 unsigned int num_formats;
596 const u64 *modifiers;
597 struct drm_plane *p;
598 const u32 *formats;
599 int err;
600
601 plane = kzalloc(sizeof(*plane), GFP_KERNEL);
602 if (!plane)
603 return ERR_PTR(-ENOMEM);
604
605 plane->base.offset = 0x0a00 + 0x0300 * index;
606 plane->base.index = index;
607
608 plane->wgrp = &hub->wgrps[wgrp];
609 plane->wgrp->parent = &dc->client;
610
611 p = &plane->base.base;
612
613 /* planes can be assigned to arbitrary CRTCs */
614 possible_crtcs = BIT(tegra->num_crtcs) - 1;
615
616 num_formats = ARRAY_SIZE(tegra_shared_plane_formats);
617 formats = tegra_shared_plane_formats;
618 modifiers = tegra_shared_plane_modifiers;
619
620 err = drm_universal_plane_init(drm, p, possible_crtcs,
621 &tegra_plane_funcs, formats,
622 num_formats, modifiers, type, NULL);
623 if (err < 0) {
624 kfree(plane);
625 return ERR_PTR(err);
626 }
627
628 drm_plane_helper_add(p, &tegra_shared_plane_helper_funcs);
629 drm_plane_create_zpos_property(p, 0, 0, 255);
630
631 return p;
632}
633
634static struct drm_private_state *
635tegra_display_hub_duplicate_state(struct drm_private_obj *obj)
636{
637 struct tegra_display_hub_state *state;
638
639 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
640 if (!state)
641 return NULL;
642
643 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
644
645 return &state->base;
646}
647
648static void tegra_display_hub_destroy_state(struct drm_private_obj *obj,
649 struct drm_private_state *state)
650{
651 struct tegra_display_hub_state *hub_state =
652 to_tegra_display_hub_state(state);
653
654 kfree(hub_state);
655}
656
657static const struct drm_private_state_funcs tegra_display_hub_state_funcs = {
658 .atomic_duplicate_state = tegra_display_hub_duplicate_state,
659 .atomic_destroy_state = tegra_display_hub_destroy_state,
660};
661
662static struct tegra_display_hub_state *
663tegra_display_hub_get_state(struct tegra_display_hub *hub,
664 struct drm_atomic_state *state)
665{
666 struct drm_private_state *priv;
667
668 priv = drm_atomic_get_private_obj_state(state, &hub->base);
669 if (IS_ERR(priv))
670 return ERR_CAST(priv);
671
672 return to_tegra_display_hub_state(priv);
673}
674
675int tegra_display_hub_atomic_check(struct drm_device *drm,
676 struct drm_atomic_state *state)
677{
678 struct tegra_drm *tegra = drm->dev_private;
679 struct tegra_display_hub_state *hub_state;
680 struct drm_crtc_state *old, *new;
681 struct drm_crtc *crtc;
682 unsigned int i;
683
684 if (!tegra->hub)
685 return 0;
686
687 hub_state = tegra_display_hub_get_state(tegra->hub, state);
688 if (IS_ERR(hub_state))
689 return PTR_ERR(hub_state);
690
691 /*
692 * The display hub display clock needs to be fed by the display clock
693 * with the highest frequency to ensure proper functioning of all the
694 * displays.
695 *
696 * Note that this isn't used before Tegra186, but it doesn't hurt and
697 * conditionalizing it would make the code less clean.
698 */
699 for_each_oldnew_crtc_in_state(state, crtc, old, new, i) {
700 struct tegra_dc_state *dc = to_dc_state(new);
701
702 if (new->active) {
703 if (!hub_state->clk || dc->pclk > hub_state->rate) {
704 hub_state->dc = to_tegra_dc(dc->base.crtc);
705 hub_state->clk = hub_state->dc->clk;
706 hub_state->rate = dc->pclk;
707 }
708 }
709 }
710
711 return 0;
712}
713
714static void tegra_display_hub_update(struct tegra_dc *dc)
715{
716 u32 value;
717 int err;
718
719 err = host1x_client_resume(&dc->client);
720 if (err < 0) {
721 dev_err(dc->dev, "failed to resume: %d\n", err);
722 return;
723 }
724
725 value = tegra_dc_readl(dc, DC_CMD_IHUB_COMMON_MISC_CTL);
726 value &= ~LATENCY_EVENT;
727 tegra_dc_writel(dc, value, DC_CMD_IHUB_COMMON_MISC_CTL);
728
729 value = tegra_dc_readl(dc, DC_DISP_IHUB_COMMON_DISPLAY_FETCH_METER);
730 value = CURS_SLOTS(1) | WGRP_SLOTS(1);
731 tegra_dc_writel(dc, value, DC_DISP_IHUB_COMMON_DISPLAY_FETCH_METER);
732
733 tegra_dc_writel(dc, COMMON_UPDATE, DC_CMD_STATE_CONTROL);
734 tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
735 tegra_dc_writel(dc, COMMON_ACTREQ, DC_CMD_STATE_CONTROL);
736 tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
737
738 host1x_client_suspend(&dc->client);
739}
740
741void tegra_display_hub_atomic_commit(struct drm_device *drm,
742 struct drm_atomic_state *state)
743{
744 struct tegra_drm *tegra = drm->dev_private;
745 struct tegra_display_hub *hub = tegra->hub;
746 struct tegra_display_hub_state *hub_state;
747 struct device *dev = hub->client.dev;
748 int err;
749
750 hub_state = to_tegra_display_hub_state(hub->base.state);
751
752 if (hub_state->clk) {
753 err = clk_set_rate(hub_state->clk, hub_state->rate);
754 if (err < 0)
755 dev_err(dev, "failed to set rate of %pC to %lu Hz\n",
756 hub_state->clk, hub_state->rate);
757
758 err = clk_set_parent(hub->clk_disp, hub_state->clk);
759 if (err < 0)
760 dev_err(dev, "failed to set parent of %pC to %pC: %d\n",
761 hub->clk_disp, hub_state->clk, err);
762 }
763
764 if (hub_state->dc)
765 tegra_display_hub_update(hub_state->dc);
766}
767
768static int tegra_display_hub_init(struct host1x_client *client)
769{
770 struct tegra_display_hub *hub = to_tegra_display_hub(client);
771 struct drm_device *drm = dev_get_drvdata(client->host);
772 struct tegra_drm *tegra = drm->dev_private;
773 struct tegra_display_hub_state *state;
774
775 state = kzalloc(sizeof(*state), GFP_KERNEL);
776 if (!state)
777 return -ENOMEM;
778
779 drm_atomic_private_obj_init(drm, &hub->base, &state->base,
780 &tegra_display_hub_state_funcs);
781
782 tegra->hub = hub;
783
784 return 0;
785}
786
787static int tegra_display_hub_exit(struct host1x_client *client)
788{
789 struct drm_device *drm = dev_get_drvdata(client->host);
790 struct tegra_drm *tegra = drm->dev_private;
791
792 drm_atomic_private_obj_fini(&tegra->hub->base);
793 tegra->hub = NULL;
794
795 return 0;
796}
797
798static int tegra_display_hub_runtime_suspend(struct host1x_client *client)
799{
800 struct tegra_display_hub *hub = to_tegra_display_hub(client);
801 struct device *dev = client->dev;
802 unsigned int i = hub->num_heads;
803 int err;
804
805 err = reset_control_assert(hub->rst);
806 if (err < 0)
807 return err;
808
809 while (i--)
810 clk_disable_unprepare(hub->clk_heads[i]);
811
812 clk_disable_unprepare(hub->clk_hub);
813 clk_disable_unprepare(hub->clk_dsc);
814 clk_disable_unprepare(hub->clk_disp);
815
816 pm_runtime_put_sync(dev);
817
818 return 0;
819}
820
821static int tegra_display_hub_runtime_resume(struct host1x_client *client)
822{
823 struct tegra_display_hub *hub = to_tegra_display_hub(client);
824 struct device *dev = client->dev;
825 unsigned int i;
826 int err;
827
828 err = pm_runtime_resume_and_get(dev);
829 if (err < 0) {
830 dev_err(dev, "failed to get runtime PM: %d\n", err);
831 return err;
832 }
833
834 err = clk_prepare_enable(hub->clk_disp);
835 if (err < 0)
836 goto put_rpm;
837
838 err = clk_prepare_enable(hub->clk_dsc);
839 if (err < 0)
840 goto disable_disp;
841
842 err = clk_prepare_enable(hub->clk_hub);
843 if (err < 0)
844 goto disable_dsc;
845
846 for (i = 0; i < hub->num_heads; i++) {
847 err = clk_prepare_enable(hub->clk_heads[i]);
848 if (err < 0)
849 goto disable_heads;
850 }
851
852 err = reset_control_deassert(hub->rst);
853 if (err < 0)
854 goto disable_heads;
855
856 return 0;
857
858disable_heads:
859 while (i--)
860 clk_disable_unprepare(hub->clk_heads[i]);
861
862 clk_disable_unprepare(hub->clk_hub);
863disable_dsc:
864 clk_disable_unprepare(hub->clk_dsc);
865disable_disp:
866 clk_disable_unprepare(hub->clk_disp);
867put_rpm:
868 pm_runtime_put_sync(dev);
869 return err;
870}
871
872static const struct host1x_client_ops tegra_display_hub_ops = {
873 .init = tegra_display_hub_init,
874 .exit = tegra_display_hub_exit,
875 .suspend = tegra_display_hub_runtime_suspend,
876 .resume = tegra_display_hub_runtime_resume,
877};
878
879static int tegra_display_hub_probe(struct platform_device *pdev)
880{
881 u64 dma_mask = dma_get_mask(pdev->dev.parent);
882 struct device_node *child = NULL;
883 struct tegra_display_hub *hub;
884 struct clk *clk;
885 unsigned int i;
886 int err;
887
888 err = dma_coerce_mask_and_coherent(&pdev->dev, dma_mask);
889 if (err < 0) {
890 dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
891 return err;
892 }
893
894 hub = devm_kzalloc(&pdev->dev, sizeof(*hub), GFP_KERNEL);
895 if (!hub)
896 return -ENOMEM;
897
898 hub->soc = of_device_get_match_data(&pdev->dev);
899
900 hub->clk_disp = devm_clk_get(&pdev->dev, "disp");
901 if (IS_ERR(hub->clk_disp)) {
902 err = PTR_ERR(hub->clk_disp);
903 return err;
904 }
905
906 if (hub->soc->supports_dsc) {
907 hub->clk_dsc = devm_clk_get(&pdev->dev, "dsc");
908 if (IS_ERR(hub->clk_dsc)) {
909 err = PTR_ERR(hub->clk_dsc);
910 return err;
911 }
912 }
913
914 hub->clk_hub = devm_clk_get(&pdev->dev, "hub");
915 if (IS_ERR(hub->clk_hub)) {
916 err = PTR_ERR(hub->clk_hub);
917 return err;
918 }
919
920 hub->rst = devm_reset_control_get(&pdev->dev, "misc");
921 if (IS_ERR(hub->rst)) {
922 err = PTR_ERR(hub->rst);
923 return err;
924 }
925
926 hub->wgrps = devm_kcalloc(&pdev->dev, hub->soc->num_wgrps,
927 sizeof(*hub->wgrps), GFP_KERNEL);
928 if (!hub->wgrps)
929 return -ENOMEM;
930
931 for (i = 0; i < hub->soc->num_wgrps; i++) {
932 struct tegra_windowgroup *wgrp = &hub->wgrps[i];
933 char id[8];
934
935 snprintf(id, sizeof(id), "wgrp%u", i);
936 mutex_init(&wgrp->lock);
937 wgrp->usecount = 0;
938 wgrp->index = i;
939
940 wgrp->rst = devm_reset_control_get(&pdev->dev, id);
941 if (IS_ERR(wgrp->rst))
942 return PTR_ERR(wgrp->rst);
943
944 err = reset_control_assert(wgrp->rst);
945 if (err < 0)
946 return err;
947 }
948
949 hub->num_heads = of_get_child_count(pdev->dev.of_node);
950
951 hub->clk_heads = devm_kcalloc(&pdev->dev, hub->num_heads, sizeof(clk),
952 GFP_KERNEL);
953 if (!hub->clk_heads)
954 return -ENOMEM;
955
956 for (i = 0; i < hub->num_heads; i++) {
957 child = of_get_next_child(pdev->dev.of_node, child);
958 if (!child) {
959 dev_err(&pdev->dev, "failed to find node for head %u\n",
960 i);
961 return -ENODEV;
962 }
963
964 clk = devm_get_clk_from_child(&pdev->dev, child, "dc");
965 if (IS_ERR(clk)) {
966 dev_err(&pdev->dev, "failed to get clock for head %u\n",
967 i);
968 of_node_put(child);
969 return PTR_ERR(clk);
970 }
971
972 hub->clk_heads[i] = clk;
973 }
974
975 of_node_put(child);
976
977 /* XXX: enable clock across reset? */
978 err = reset_control_assert(hub->rst);
979 if (err < 0)
980 return err;
981
982 platform_set_drvdata(pdev, hub);
983 pm_runtime_enable(&pdev->dev);
984
985 INIT_LIST_HEAD(&hub->client.list);
986 hub->client.ops = &tegra_display_hub_ops;
987 hub->client.dev = &pdev->dev;
988
989 err = host1x_client_register(&hub->client);
990 if (err < 0)
991 dev_err(&pdev->dev, "failed to register host1x client: %d\n",
992 err);
993
994 err = devm_of_platform_populate(&pdev->dev);
995 if (err < 0)
996 goto unregister;
997
998 return err;
999
1000unregister:
1001 host1x_client_unregister(&hub->client);
1002 pm_runtime_disable(&pdev->dev);
1003 return err;
1004}
1005
1006static int tegra_display_hub_remove(struct platform_device *pdev)
1007{
1008 struct tegra_display_hub *hub = platform_get_drvdata(pdev);
1009 unsigned int i;
1010 int err;
1011
1012 err = host1x_client_unregister(&hub->client);
1013 if (err < 0) {
1014 dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
1015 err);
1016 }
1017
1018 for (i = 0; i < hub->soc->num_wgrps; i++) {
1019 struct tegra_windowgroup *wgrp = &hub->wgrps[i];
1020
1021 mutex_destroy(&wgrp->lock);
1022 }
1023
1024 pm_runtime_disable(&pdev->dev);
1025
1026 return err;
1027}
1028
1029static const struct tegra_display_hub_soc tegra186_display_hub = {
1030 .num_wgrps = 6,
1031 .supports_dsc = true,
1032};
1033
1034static const struct tegra_display_hub_soc tegra194_display_hub = {
1035 .num_wgrps = 6,
1036 .supports_dsc = false,
1037};
1038
1039static const struct of_device_id tegra_display_hub_of_match[] = {
1040 {
1041 .compatible = "nvidia,tegra194-display",
1042 .data = &tegra194_display_hub
1043 }, {
1044 .compatible = "nvidia,tegra186-display",
1045 .data = &tegra186_display_hub
1046 }, {
1047 /* sentinel */
1048 }
1049};
1050MODULE_DEVICE_TABLE(of, tegra_display_hub_of_match);
1051
1052struct platform_driver tegra_display_hub_driver = {
1053 .driver = {
1054 .name = "tegra-display-hub",
1055 .of_match_table = tegra_display_hub_of_match,
1056 },
1057 .probe = tegra_display_hub_probe,
1058 .remove = tegra_display_hub_remove,
1059};