Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2015 Broadcom
4 */
5
6/**
7 * DOC: VC4 KMS
8 *
9 * This is the general code for implementing KMS mode setting that
10 * doesn't clearly associate with any of the other objects (plane,
11 * crtc, HDMI encoder).
12 */
13
14#include <drm/drm_crtc.h>
15#include <drm/drm_atomic.h>
16#include <drm/drm_atomic_helper.h>
17#include <drm/drm_gem_framebuffer_helper.h>
18#include <drm/drm_plane_helper.h>
19#include <drm/drm_probe_helper.h>
20#include "vc4_drv.h"
21#include "vc4_regs.h"
22
23struct vc4_ctm_state {
24 struct drm_private_state base;
25 struct drm_color_ctm *ctm;
26 int fifo;
27};
28
29static struct vc4_ctm_state *to_vc4_ctm_state(struct drm_private_state *priv)
30{
31 return container_of(priv, struct vc4_ctm_state, base);
32}
33
34struct vc4_load_tracker_state {
35 struct drm_private_state base;
36 u64 hvs_load;
37 u64 membus_load;
38};
39
40static struct vc4_load_tracker_state *
41to_vc4_load_tracker_state(struct drm_private_state *priv)
42{
43 return container_of(priv, struct vc4_load_tracker_state, base);
44}
45
46static struct vc4_ctm_state *vc4_get_ctm_state(struct drm_atomic_state *state,
47 struct drm_private_obj *manager)
48{
49 struct drm_device *dev = state->dev;
50 struct vc4_dev *vc4 = dev->dev_private;
51 struct drm_private_state *priv_state;
52 int ret;
53
54 ret = drm_modeset_lock(&vc4->ctm_state_lock, state->acquire_ctx);
55 if (ret)
56 return ERR_PTR(ret);
57
58 priv_state = drm_atomic_get_private_obj_state(state, manager);
59 if (IS_ERR(priv_state))
60 return ERR_CAST(priv_state);
61
62 return to_vc4_ctm_state(priv_state);
63}
64
65static struct drm_private_state *
66vc4_ctm_duplicate_state(struct drm_private_obj *obj)
67{
68 struct vc4_ctm_state *state;
69
70 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
71 if (!state)
72 return NULL;
73
74 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
75
76 return &state->base;
77}
78
79static void vc4_ctm_destroy_state(struct drm_private_obj *obj,
80 struct drm_private_state *state)
81{
82 struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(state);
83
84 kfree(ctm_state);
85}
86
87static const struct drm_private_state_funcs vc4_ctm_state_funcs = {
88 .atomic_duplicate_state = vc4_ctm_duplicate_state,
89 .atomic_destroy_state = vc4_ctm_destroy_state,
90};
91
92/* Converts a DRM S31.32 value to the HW S0.9 format. */
93static u16 vc4_ctm_s31_32_to_s0_9(u64 in)
94{
95 u16 r;
96
97 /* Sign bit. */
98 r = in & BIT_ULL(63) ? BIT(9) : 0;
99
100 if ((in & GENMASK_ULL(62, 32)) > 0) {
101 /* We have zero integer bits so we can only saturate here. */
102 r |= GENMASK(8, 0);
103 } else {
104 /* Otherwise take the 9 most important fractional bits. */
105 r |= (in >> 23) & GENMASK(8, 0);
106 }
107
108 return r;
109}
110
111static void
112vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state)
113{
114 struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(vc4->ctm_manager.state);
115 struct drm_color_ctm *ctm = ctm_state->ctm;
116
117 if (ctm_state->fifo) {
118 HVS_WRITE(SCALER_OLEDCOEF2,
119 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[0]),
120 SCALER_OLEDCOEF2_R_TO_R) |
121 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[3]),
122 SCALER_OLEDCOEF2_R_TO_G) |
123 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[6]),
124 SCALER_OLEDCOEF2_R_TO_B));
125 HVS_WRITE(SCALER_OLEDCOEF1,
126 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[1]),
127 SCALER_OLEDCOEF1_G_TO_R) |
128 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[4]),
129 SCALER_OLEDCOEF1_G_TO_G) |
130 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[7]),
131 SCALER_OLEDCOEF1_G_TO_B));
132 HVS_WRITE(SCALER_OLEDCOEF0,
133 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[2]),
134 SCALER_OLEDCOEF0_B_TO_R) |
135 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[5]),
136 SCALER_OLEDCOEF0_B_TO_G) |
137 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[8]),
138 SCALER_OLEDCOEF0_B_TO_B));
139 }
140
141 HVS_WRITE(SCALER_OLEDOFFS,
142 VC4_SET_FIELD(ctm_state->fifo, SCALER_OLEDOFFS_DISPFIFO));
143}
144
145static void
146vc4_atomic_complete_commit(struct drm_atomic_state *state)
147{
148 struct drm_device *dev = state->dev;
149 struct vc4_dev *vc4 = to_vc4_dev(dev);
150 struct vc4_crtc *vc4_crtc;
151 int i;
152
153 for (i = 0; i < dev->mode_config.num_crtc; i++) {
154 if (!state->crtcs[i].ptr || !state->crtcs[i].commit)
155 continue;
156
157 vc4_crtc = to_vc4_crtc(state->crtcs[i].ptr);
158 vc4_hvs_mask_underrun(dev, vc4_crtc->channel);
159 }
160
161 drm_atomic_helper_wait_for_fences(dev, state, false);
162
163 drm_atomic_helper_wait_for_dependencies(state);
164
165 drm_atomic_helper_commit_modeset_disables(dev, state);
166
167 vc4_ctm_commit(vc4, state);
168
169 drm_atomic_helper_commit_planes(dev, state, 0);
170
171 drm_atomic_helper_commit_modeset_enables(dev, state);
172
173 drm_atomic_helper_fake_vblank(state);
174
175 drm_atomic_helper_commit_hw_done(state);
176
177 drm_atomic_helper_wait_for_flip_done(dev, state);
178
179 drm_atomic_helper_cleanup_planes(dev, state);
180
181 drm_atomic_helper_commit_cleanup_done(state);
182
183 drm_atomic_state_put(state);
184
185 up(&vc4->async_modeset);
186}
187
188static void commit_work(struct work_struct *work)
189{
190 struct drm_atomic_state *state = container_of(work,
191 struct drm_atomic_state,
192 commit_work);
193 vc4_atomic_complete_commit(state);
194}
195
196/**
197 * vc4_atomic_commit - commit validated state object
198 * @dev: DRM device
199 * @state: the driver state object
200 * @nonblock: nonblocking commit
201 *
202 * This function commits a with drm_atomic_helper_check() pre-validated state
203 * object. This can still fail when e.g. the framebuffer reservation fails. For
204 * now this doesn't implement asynchronous commits.
205 *
206 * RETURNS
207 * Zero for success or -errno.
208 */
209static int vc4_atomic_commit(struct drm_device *dev,
210 struct drm_atomic_state *state,
211 bool nonblock)
212{
213 struct vc4_dev *vc4 = to_vc4_dev(dev);
214 int ret;
215
216 if (state->async_update) {
217 ret = down_interruptible(&vc4->async_modeset);
218 if (ret)
219 return ret;
220
221 ret = drm_atomic_helper_prepare_planes(dev, state);
222 if (ret) {
223 up(&vc4->async_modeset);
224 return ret;
225 }
226
227 drm_atomic_helper_async_commit(dev, state);
228
229 drm_atomic_helper_cleanup_planes(dev, state);
230
231 up(&vc4->async_modeset);
232
233 return 0;
234 }
235
236 /* We know for sure we don't want an async update here. Set
237 * state->legacy_cursor_update to false to prevent
238 * drm_atomic_helper_setup_commit() from auto-completing
239 * commit->flip_done.
240 */
241 state->legacy_cursor_update = false;
242 ret = drm_atomic_helper_setup_commit(state, nonblock);
243 if (ret)
244 return ret;
245
246 INIT_WORK(&state->commit_work, commit_work);
247
248 ret = down_interruptible(&vc4->async_modeset);
249 if (ret)
250 return ret;
251
252 ret = drm_atomic_helper_prepare_planes(dev, state);
253 if (ret) {
254 up(&vc4->async_modeset);
255 return ret;
256 }
257
258 if (!nonblock) {
259 ret = drm_atomic_helper_wait_for_fences(dev, state, true);
260 if (ret) {
261 drm_atomic_helper_cleanup_planes(dev, state);
262 up(&vc4->async_modeset);
263 return ret;
264 }
265 }
266
267 /*
268 * This is the point of no return - everything below never fails except
269 * when the hw goes bonghits. Which means we can commit the new state on
270 * the software side now.
271 */
272
273 BUG_ON(drm_atomic_helper_swap_state(state, false) < 0);
274
275 /*
276 * Everything below can be run asynchronously without the need to grab
277 * any modeset locks at all under one condition: It must be guaranteed
278 * that the asynchronous work has either been cancelled (if the driver
279 * supports it, which at least requires that the framebuffers get
280 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
281 * before the new state gets committed on the software side with
282 * drm_atomic_helper_swap_state().
283 *
284 * This scheme allows new atomic state updates to be prepared and
285 * checked in parallel to the asynchronous completion of the previous
286 * update. Which is important since compositors need to figure out the
287 * composition of the next frame right after having submitted the
288 * current layout.
289 */
290
291 drm_atomic_state_get(state);
292 if (nonblock)
293 queue_work(system_unbound_wq, &state->commit_work);
294 else
295 vc4_atomic_complete_commit(state);
296
297 return 0;
298}
299
300static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev,
301 struct drm_file *file_priv,
302 const struct drm_mode_fb_cmd2 *mode_cmd)
303{
304 struct drm_mode_fb_cmd2 mode_cmd_local;
305
306 /* If the user didn't specify a modifier, use the
307 * vc4_set_tiling_ioctl() state for the BO.
308 */
309 if (!(mode_cmd->flags & DRM_MODE_FB_MODIFIERS)) {
310 struct drm_gem_object *gem_obj;
311 struct vc4_bo *bo;
312
313 gem_obj = drm_gem_object_lookup(file_priv,
314 mode_cmd->handles[0]);
315 if (!gem_obj) {
316 DRM_DEBUG("Failed to look up GEM BO %d\n",
317 mode_cmd->handles[0]);
318 return ERR_PTR(-ENOENT);
319 }
320 bo = to_vc4_bo(gem_obj);
321
322 mode_cmd_local = *mode_cmd;
323
324 if (bo->t_format) {
325 mode_cmd_local.modifier[0] =
326 DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED;
327 } else {
328 mode_cmd_local.modifier[0] = DRM_FORMAT_MOD_NONE;
329 }
330
331 drm_gem_object_put_unlocked(gem_obj);
332
333 mode_cmd = &mode_cmd_local;
334 }
335
336 return drm_gem_fb_create(dev, file_priv, mode_cmd);
337}
338
339/* Our CTM has some peculiar limitations: we can only enable it for one CRTC
340 * at a time and the HW only supports S0.9 scalars. To account for the latter,
341 * we don't allow userland to set a CTM that we have no hope of approximating.
342 */
343static int
344vc4_ctm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
345{
346 struct vc4_dev *vc4 = to_vc4_dev(dev);
347 struct vc4_ctm_state *ctm_state = NULL;
348 struct drm_crtc *crtc;
349 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
350 struct drm_color_ctm *ctm;
351 int i;
352
353 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
354 /* CTM is being disabled. */
355 if (!new_crtc_state->ctm && old_crtc_state->ctm) {
356 ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager);
357 if (IS_ERR(ctm_state))
358 return PTR_ERR(ctm_state);
359 ctm_state->fifo = 0;
360 }
361 }
362
363 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
364 if (new_crtc_state->ctm == old_crtc_state->ctm)
365 continue;
366
367 if (!ctm_state) {
368 ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager);
369 if (IS_ERR(ctm_state))
370 return PTR_ERR(ctm_state);
371 }
372
373 /* CTM is being enabled or the matrix changed. */
374 if (new_crtc_state->ctm) {
375 /* fifo is 1-based since 0 disables CTM. */
376 int fifo = to_vc4_crtc(crtc)->channel + 1;
377
378 /* Check userland isn't trying to turn on CTM for more
379 * than one CRTC at a time.
380 */
381 if (ctm_state->fifo && ctm_state->fifo != fifo) {
382 DRM_DEBUG_DRIVER("Too many CTM configured\n");
383 return -EINVAL;
384 }
385
386 /* Check we can approximate the specified CTM.
387 * We disallow scalars |c| > 1.0 since the HW has
388 * no integer bits.
389 */
390 ctm = new_crtc_state->ctm->data;
391 for (i = 0; i < ARRAY_SIZE(ctm->matrix); i++) {
392 u64 val = ctm->matrix[i];
393
394 val &= ~BIT_ULL(63);
395 if (val > BIT_ULL(32))
396 return -EINVAL;
397 }
398
399 ctm_state->fifo = fifo;
400 ctm_state->ctm = ctm;
401 }
402 }
403
404 return 0;
405}
406
407static int vc4_load_tracker_atomic_check(struct drm_atomic_state *state)
408{
409 struct drm_plane_state *old_plane_state, *new_plane_state;
410 struct vc4_dev *vc4 = to_vc4_dev(state->dev);
411 struct vc4_load_tracker_state *load_state;
412 struct drm_private_state *priv_state;
413 struct drm_plane *plane;
414 int i;
415
416 priv_state = drm_atomic_get_private_obj_state(state,
417 &vc4->load_tracker);
418 if (IS_ERR(priv_state))
419 return PTR_ERR(priv_state);
420
421 load_state = to_vc4_load_tracker_state(priv_state);
422 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
423 new_plane_state, i) {
424 struct vc4_plane_state *vc4_plane_state;
425
426 if (old_plane_state->fb && old_plane_state->crtc) {
427 vc4_plane_state = to_vc4_plane_state(old_plane_state);
428 load_state->membus_load -= vc4_plane_state->membus_load;
429 load_state->hvs_load -= vc4_plane_state->hvs_load;
430 }
431
432 if (new_plane_state->fb && new_plane_state->crtc) {
433 vc4_plane_state = to_vc4_plane_state(new_plane_state);
434 load_state->membus_load += vc4_plane_state->membus_load;
435 load_state->hvs_load += vc4_plane_state->hvs_load;
436 }
437 }
438
439 /* Don't check the load when the tracker is disabled. */
440 if (!vc4->load_tracker_enabled)
441 return 0;
442
443 /* The absolute limit is 2Gbyte/sec, but let's take a margin to let
444 * the system work when other blocks are accessing the memory.
445 */
446 if (load_state->membus_load > SZ_1G + SZ_512M)
447 return -ENOSPC;
448
449 /* HVS clock is supposed to run @ 250Mhz, let's take a margin and
450 * consider the maximum number of cycles is 240M.
451 */
452 if (load_state->hvs_load > 240000000ULL)
453 return -ENOSPC;
454
455 return 0;
456}
457
458static struct drm_private_state *
459vc4_load_tracker_duplicate_state(struct drm_private_obj *obj)
460{
461 struct vc4_load_tracker_state *state;
462
463 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
464 if (!state)
465 return NULL;
466
467 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
468
469 return &state->base;
470}
471
472static void vc4_load_tracker_destroy_state(struct drm_private_obj *obj,
473 struct drm_private_state *state)
474{
475 struct vc4_load_tracker_state *load_state;
476
477 load_state = to_vc4_load_tracker_state(state);
478 kfree(load_state);
479}
480
481static const struct drm_private_state_funcs vc4_load_tracker_state_funcs = {
482 .atomic_duplicate_state = vc4_load_tracker_duplicate_state,
483 .atomic_destroy_state = vc4_load_tracker_destroy_state,
484};
485
486static int
487vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
488{
489 int ret;
490
491 ret = vc4_ctm_atomic_check(dev, state);
492 if (ret < 0)
493 return ret;
494
495 ret = drm_atomic_helper_check(dev, state);
496 if (ret)
497 return ret;
498
499 return vc4_load_tracker_atomic_check(state);
500}
501
502static const struct drm_mode_config_funcs vc4_mode_funcs = {
503 .atomic_check = vc4_atomic_check,
504 .atomic_commit = vc4_atomic_commit,
505 .fb_create = vc4_fb_create,
506};
507
508int vc4_kms_load(struct drm_device *dev)
509{
510 struct vc4_dev *vc4 = to_vc4_dev(dev);
511 struct vc4_ctm_state *ctm_state;
512 struct vc4_load_tracker_state *load_state;
513 int ret;
514
515 /* Start with the load tracker enabled. Can be disabled through the
516 * debugfs load_tracker file.
517 */
518 vc4->load_tracker_enabled = true;
519
520 sema_init(&vc4->async_modeset, 1);
521
522 /* Set support for vblank irq fast disable, before drm_vblank_init() */
523 dev->vblank_disable_immediate = true;
524
525 dev->irq_enabled = true;
526 ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
527 if (ret < 0) {
528 dev_err(dev->dev, "failed to initialize vblank\n");
529 return ret;
530 }
531
532 dev->mode_config.max_width = 2048;
533 dev->mode_config.max_height = 2048;
534 dev->mode_config.funcs = &vc4_mode_funcs;
535 dev->mode_config.preferred_depth = 24;
536 dev->mode_config.async_page_flip = true;
537 dev->mode_config.allow_fb_modifiers = true;
538
539 drm_modeset_lock_init(&vc4->ctm_state_lock);
540
541 ctm_state = kzalloc(sizeof(*ctm_state), GFP_KERNEL);
542 if (!ctm_state)
543 return -ENOMEM;
544
545 drm_atomic_private_obj_init(dev, &vc4->ctm_manager, &ctm_state->base,
546 &vc4_ctm_state_funcs);
547
548 load_state = kzalloc(sizeof(*load_state), GFP_KERNEL);
549 if (!load_state) {
550 drm_atomic_private_obj_fini(&vc4->ctm_manager);
551 return -ENOMEM;
552 }
553
554 drm_atomic_private_obj_init(dev, &vc4->load_tracker, &load_state->base,
555 &vc4_load_tracker_state_funcs);
556
557 drm_mode_config_reset(dev);
558
559 drm_kms_helper_poll_init(dev);
560
561 return 0;
562}