Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright (C) 2014 Red Hat
3 * Copyright (C) 2014 Intel Corp.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robdclark@gmail.com>
25 * Daniel Vetter <daniel.vetter@ffwll.ch>
26 */
27
28#include <drm/drmP.h>
29#include <drm/drm_atomic.h>
30#include <drm/drm_atomic_uapi.h>
31#include <drm/drm_plane_helper.h>
32#include <drm/drm_crtc_helper.h>
33#include <drm/drm_atomic_helper.h>
34#include <drm/drm_writeback.h>
35#include <drm/drm_damage_helper.h>
36#include <linux/dma-fence.h>
37
38#include "drm_crtc_helper_internal.h"
39#include "drm_crtc_internal.h"
40
41/**
42 * DOC: overview
43 *
44 * This helper library provides implementations of check and commit functions on
45 * top of the CRTC modeset helper callbacks and the plane helper callbacks. It
46 * also provides convenience implementations for the atomic state handling
47 * callbacks for drivers which don't need to subclass the drm core structures to
48 * add their own additional internal state.
49 *
50 * This library also provides default implementations for the check callback in
51 * drm_atomic_helper_check() and for the commit callback with
52 * drm_atomic_helper_commit(). But the individual stages and callbacks are
53 * exposed to allow drivers to mix and match and e.g. use the plane helpers only
54 * together with a driver private modeset implementation.
55 *
56 * This library also provides implementations for all the legacy driver
57 * interfaces on top of the atomic interface. See drm_atomic_helper_set_config(),
58 * drm_atomic_helper_disable_plane(), drm_atomic_helper_disable_plane() and the
59 * various functions to implement set_property callbacks. New drivers must not
60 * implement these functions themselves but must use the provided helpers.
61 *
62 * The atomic helper uses the same function table structures as all other
63 * modesetting helpers. See the documentation for &struct drm_crtc_helper_funcs,
64 * struct &drm_encoder_helper_funcs and &struct drm_connector_helper_funcs. It
65 * also shares the &struct drm_plane_helper_funcs function table with the plane
66 * helpers.
67 */
68static void
69drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
70 struct drm_plane_state *old_plane_state,
71 struct drm_plane_state *plane_state,
72 struct drm_plane *plane)
73{
74 struct drm_crtc_state *crtc_state;
75
76 if (old_plane_state->crtc) {
77 crtc_state = drm_atomic_get_new_crtc_state(state,
78 old_plane_state->crtc);
79
80 if (WARN_ON(!crtc_state))
81 return;
82
83 crtc_state->planes_changed = true;
84 }
85
86 if (plane_state->crtc) {
87 crtc_state = drm_atomic_get_new_crtc_state(state, plane_state->crtc);
88
89 if (WARN_ON(!crtc_state))
90 return;
91
92 crtc_state->planes_changed = true;
93 }
94}
95
96/*
97 * For connectors that support multiple encoders, either the
98 * .atomic_best_encoder() or .best_encoder() operation must be implemented.
99 */
100static struct drm_encoder *
101pick_single_encoder_for_connector(struct drm_connector *connector)
102{
103 WARN_ON(connector->encoder_ids[1]);
104 return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]);
105}
106
107static int handle_conflicting_encoders(struct drm_atomic_state *state,
108 bool disable_conflicting_encoders)
109{
110 struct drm_connector_state *new_conn_state;
111 struct drm_connector *connector;
112 struct drm_connector_list_iter conn_iter;
113 struct drm_encoder *encoder;
114 unsigned encoder_mask = 0;
115 int i, ret = 0;
116
117 /*
118 * First loop, find all newly assigned encoders from the connectors
119 * part of the state. If the same encoder is assigned to multiple
120 * connectors bail out.
121 */
122 for_each_new_connector_in_state(state, connector, new_conn_state, i) {
123 const struct drm_connector_helper_funcs *funcs = connector->helper_private;
124 struct drm_encoder *new_encoder;
125
126 if (!new_conn_state->crtc)
127 continue;
128
129 if (funcs->atomic_best_encoder)
130 new_encoder = funcs->atomic_best_encoder(connector, new_conn_state);
131 else if (funcs->best_encoder)
132 new_encoder = funcs->best_encoder(connector);
133 else
134 new_encoder = pick_single_encoder_for_connector(connector);
135
136 if (new_encoder) {
137 if (encoder_mask & drm_encoder_mask(new_encoder)) {
138 DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] on [CONNECTOR:%d:%s] already assigned\n",
139 new_encoder->base.id, new_encoder->name,
140 connector->base.id, connector->name);
141
142 return -EINVAL;
143 }
144
145 encoder_mask |= drm_encoder_mask(new_encoder);
146 }
147 }
148
149 if (!encoder_mask)
150 return 0;
151
152 /*
153 * Second loop, iterate over all connectors not part of the state.
154 *
155 * If a conflicting encoder is found and disable_conflicting_encoders
156 * is not set, an error is returned. Userspace can provide a solution
157 * through the atomic ioctl.
158 *
159 * If the flag is set conflicting connectors are removed from the crtc
160 * and the crtc is disabled if no encoder is left. This preserves
161 * compatibility with the legacy set_config behavior.
162 */
163 drm_connector_list_iter_begin(state->dev, &conn_iter);
164 drm_for_each_connector_iter(connector, &conn_iter) {
165 struct drm_crtc_state *crtc_state;
166
167 if (drm_atomic_get_new_connector_state(state, connector))
168 continue;
169
170 encoder = connector->state->best_encoder;
171 if (!encoder || !(encoder_mask & drm_encoder_mask(encoder)))
172 continue;
173
174 if (!disable_conflicting_encoders) {
175 DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s] by [CONNECTOR:%d:%s]\n",
176 encoder->base.id, encoder->name,
177 connector->state->crtc->base.id,
178 connector->state->crtc->name,
179 connector->base.id, connector->name);
180 ret = -EINVAL;
181 goto out;
182 }
183
184 new_conn_state = drm_atomic_get_connector_state(state, connector);
185 if (IS_ERR(new_conn_state)) {
186 ret = PTR_ERR(new_conn_state);
187 goto out;
188 }
189
190 DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], disabling [CONNECTOR:%d:%s]\n",
191 encoder->base.id, encoder->name,
192 new_conn_state->crtc->base.id, new_conn_state->crtc->name,
193 connector->base.id, connector->name);
194
195 crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
196
197 ret = drm_atomic_set_crtc_for_connector(new_conn_state, NULL);
198 if (ret)
199 goto out;
200
201 if (!crtc_state->connector_mask) {
202 ret = drm_atomic_set_mode_prop_for_crtc(crtc_state,
203 NULL);
204 if (ret < 0)
205 goto out;
206
207 crtc_state->active = false;
208 }
209 }
210out:
211 drm_connector_list_iter_end(&conn_iter);
212
213 return ret;
214}
215
216static void
217set_best_encoder(struct drm_atomic_state *state,
218 struct drm_connector_state *conn_state,
219 struct drm_encoder *encoder)
220{
221 struct drm_crtc_state *crtc_state;
222 struct drm_crtc *crtc;
223
224 if (conn_state->best_encoder) {
225 /* Unset the encoder_mask in the old crtc state. */
226 crtc = conn_state->connector->state->crtc;
227
228 /* A NULL crtc is an error here because we should have
229 * duplicated a NULL best_encoder when crtc was NULL.
230 * As an exception restoring duplicated atomic state
231 * during resume is allowed, so don't warn when
232 * best_encoder is equal to encoder we intend to set.
233 */
234 WARN_ON(!crtc && encoder != conn_state->best_encoder);
235 if (crtc) {
236 crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
237
238 crtc_state->encoder_mask &=
239 ~drm_encoder_mask(conn_state->best_encoder);
240 }
241 }
242
243 if (encoder) {
244 crtc = conn_state->crtc;
245 WARN_ON(!crtc);
246 if (crtc) {
247 crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
248
249 crtc_state->encoder_mask |=
250 drm_encoder_mask(encoder);
251 }
252 }
253
254 conn_state->best_encoder = encoder;
255}
256
257static void
258steal_encoder(struct drm_atomic_state *state,
259 struct drm_encoder *encoder)
260{
261 struct drm_crtc_state *crtc_state;
262 struct drm_connector *connector;
263 struct drm_connector_state *old_connector_state, *new_connector_state;
264 int i;
265
266 for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
267 struct drm_crtc *encoder_crtc;
268
269 if (new_connector_state->best_encoder != encoder)
270 continue;
271
272 encoder_crtc = old_connector_state->crtc;
273
274 DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], stealing it\n",
275 encoder->base.id, encoder->name,
276 encoder_crtc->base.id, encoder_crtc->name);
277
278 set_best_encoder(state, new_connector_state, NULL);
279
280 crtc_state = drm_atomic_get_new_crtc_state(state, encoder_crtc);
281 crtc_state->connectors_changed = true;
282
283 return;
284 }
285}
286
287static int
288update_connector_routing(struct drm_atomic_state *state,
289 struct drm_connector *connector,
290 struct drm_connector_state *old_connector_state,
291 struct drm_connector_state *new_connector_state)
292{
293 const struct drm_connector_helper_funcs *funcs;
294 struct drm_encoder *new_encoder;
295 struct drm_crtc_state *crtc_state;
296
297 DRM_DEBUG_ATOMIC("Updating routing for [CONNECTOR:%d:%s]\n",
298 connector->base.id,
299 connector->name);
300
301 if (old_connector_state->crtc != new_connector_state->crtc) {
302 if (old_connector_state->crtc) {
303 crtc_state = drm_atomic_get_new_crtc_state(state, old_connector_state->crtc);
304 crtc_state->connectors_changed = true;
305 }
306
307 if (new_connector_state->crtc) {
308 crtc_state = drm_atomic_get_new_crtc_state(state, new_connector_state->crtc);
309 crtc_state->connectors_changed = true;
310 }
311 }
312
313 if (!new_connector_state->crtc) {
314 DRM_DEBUG_ATOMIC("Disabling [CONNECTOR:%d:%s]\n",
315 connector->base.id,
316 connector->name);
317
318 set_best_encoder(state, new_connector_state, NULL);
319
320 return 0;
321 }
322
323 crtc_state = drm_atomic_get_new_crtc_state(state,
324 new_connector_state->crtc);
325 /*
326 * For compatibility with legacy users, we want to make sure that
327 * we allow DPMS On->Off modesets on unregistered connectors. Modesets
328 * which would result in anything else must be considered invalid, to
329 * avoid turning on new displays on dead connectors.
330 *
331 * Since the connector can be unregistered at any point during an
332 * atomic check or commit, this is racy. But that's OK: all we care
333 * about is ensuring that userspace can't do anything but shut off the
334 * display on a connector that was destroyed after its been notified,
335 * not before.
336 */
337 if (drm_connector_is_unregistered(connector) && crtc_state->active) {
338 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] is not registered\n",
339 connector->base.id, connector->name);
340 return -EINVAL;
341 }
342
343 funcs = connector->helper_private;
344
345 if (funcs->atomic_best_encoder)
346 new_encoder = funcs->atomic_best_encoder(connector,
347 new_connector_state);
348 else if (funcs->best_encoder)
349 new_encoder = funcs->best_encoder(connector);
350 else
351 new_encoder = pick_single_encoder_for_connector(connector);
352
353 if (!new_encoder) {
354 DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n",
355 connector->base.id,
356 connector->name);
357 return -EINVAL;
358 }
359
360 if (!drm_encoder_crtc_ok(new_encoder, new_connector_state->crtc)) {
361 DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] incompatible with [CRTC:%d:%s]\n",
362 new_encoder->base.id,
363 new_encoder->name,
364 new_connector_state->crtc->base.id,
365 new_connector_state->crtc->name);
366 return -EINVAL;
367 }
368
369 if (new_encoder == new_connector_state->best_encoder) {
370 set_best_encoder(state, new_connector_state, new_encoder);
371
372 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d:%s]\n",
373 connector->base.id,
374 connector->name,
375 new_encoder->base.id,
376 new_encoder->name,
377 new_connector_state->crtc->base.id,
378 new_connector_state->crtc->name);
379
380 return 0;
381 }
382
383 steal_encoder(state, new_encoder);
384
385 set_best_encoder(state, new_connector_state, new_encoder);
386
387 crtc_state->connectors_changed = true;
388
389 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d:%s]\n",
390 connector->base.id,
391 connector->name,
392 new_encoder->base.id,
393 new_encoder->name,
394 new_connector_state->crtc->base.id,
395 new_connector_state->crtc->name);
396
397 return 0;
398}
399
400static int
401mode_fixup(struct drm_atomic_state *state)
402{
403 struct drm_crtc *crtc;
404 struct drm_crtc_state *new_crtc_state;
405 struct drm_connector *connector;
406 struct drm_connector_state *new_conn_state;
407 int i;
408 int ret;
409
410 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
411 if (!new_crtc_state->mode_changed &&
412 !new_crtc_state->connectors_changed)
413 continue;
414
415 drm_mode_copy(&new_crtc_state->adjusted_mode, &new_crtc_state->mode);
416 }
417
418 for_each_new_connector_in_state(state, connector, new_conn_state, i) {
419 const struct drm_encoder_helper_funcs *funcs;
420 struct drm_encoder *encoder;
421
422 WARN_ON(!!new_conn_state->best_encoder != !!new_conn_state->crtc);
423
424 if (!new_conn_state->crtc || !new_conn_state->best_encoder)
425 continue;
426
427 new_crtc_state =
428 drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
429
430 /*
431 * Each encoder has at most one connector (since we always steal
432 * it away), so we won't call ->mode_fixup twice.
433 */
434 encoder = new_conn_state->best_encoder;
435 funcs = encoder->helper_private;
436
437 ret = drm_bridge_mode_fixup(encoder->bridge, &new_crtc_state->mode,
438 &new_crtc_state->adjusted_mode);
439 if (!ret) {
440 DRM_DEBUG_ATOMIC("Bridge fixup failed\n");
441 return -EINVAL;
442 }
443
444 if (funcs && funcs->atomic_check) {
445 ret = funcs->atomic_check(encoder, new_crtc_state,
446 new_conn_state);
447 if (ret) {
448 DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] check failed\n",
449 encoder->base.id, encoder->name);
450 return ret;
451 }
452 } else if (funcs && funcs->mode_fixup) {
453 ret = funcs->mode_fixup(encoder, &new_crtc_state->mode,
454 &new_crtc_state->adjusted_mode);
455 if (!ret) {
456 DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] fixup failed\n",
457 encoder->base.id, encoder->name);
458 return -EINVAL;
459 }
460 }
461 }
462
463 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
464 const struct drm_crtc_helper_funcs *funcs;
465
466 if (!new_crtc_state->enable)
467 continue;
468
469 if (!new_crtc_state->mode_changed &&
470 !new_crtc_state->connectors_changed)
471 continue;
472
473 funcs = crtc->helper_private;
474 if (!funcs->mode_fixup)
475 continue;
476
477 ret = funcs->mode_fixup(crtc, &new_crtc_state->mode,
478 &new_crtc_state->adjusted_mode);
479 if (!ret) {
480 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] fixup failed\n",
481 crtc->base.id, crtc->name);
482 return -EINVAL;
483 }
484 }
485
486 return 0;
487}
488
489static enum drm_mode_status mode_valid_path(struct drm_connector *connector,
490 struct drm_encoder *encoder,
491 struct drm_crtc *crtc,
492 struct drm_display_mode *mode)
493{
494 enum drm_mode_status ret;
495
496 ret = drm_encoder_mode_valid(encoder, mode);
497 if (ret != MODE_OK) {
498 DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] mode_valid() failed\n",
499 encoder->base.id, encoder->name);
500 return ret;
501 }
502
503 ret = drm_bridge_mode_valid(encoder->bridge, mode);
504 if (ret != MODE_OK) {
505 DRM_DEBUG_ATOMIC("[BRIDGE] mode_valid() failed\n");
506 return ret;
507 }
508
509 ret = drm_crtc_mode_valid(crtc, mode);
510 if (ret != MODE_OK) {
511 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode_valid() failed\n",
512 crtc->base.id, crtc->name);
513 return ret;
514 }
515
516 return ret;
517}
518
519static int
520mode_valid(struct drm_atomic_state *state)
521{
522 struct drm_connector_state *conn_state;
523 struct drm_connector *connector;
524 int i;
525
526 for_each_new_connector_in_state(state, connector, conn_state, i) {
527 struct drm_encoder *encoder = conn_state->best_encoder;
528 struct drm_crtc *crtc = conn_state->crtc;
529 struct drm_crtc_state *crtc_state;
530 enum drm_mode_status mode_status;
531 struct drm_display_mode *mode;
532
533 if (!crtc || !encoder)
534 continue;
535
536 crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
537 if (!crtc_state)
538 continue;
539 if (!crtc_state->mode_changed && !crtc_state->connectors_changed)
540 continue;
541
542 mode = &crtc_state->mode;
543
544 mode_status = mode_valid_path(connector, encoder, crtc, mode);
545 if (mode_status != MODE_OK)
546 return -EINVAL;
547 }
548
549 return 0;
550}
551
552/**
553 * drm_atomic_helper_check_modeset - validate state object for modeset changes
554 * @dev: DRM device
555 * @state: the driver state object
556 *
557 * Check the state object to see if the requested state is physically possible.
558 * This does all the crtc and connector related computations for an atomic
559 * update and adds any additional connectors needed for full modesets. It calls
560 * the various per-object callbacks in the follow order:
561 *
562 * 1. &drm_connector_helper_funcs.atomic_best_encoder for determining the new encoder.
563 * 2. &drm_connector_helper_funcs.atomic_check to validate the connector state.
564 * 3. If it's determined a modeset is needed then all connectors on the affected crtc
565 * crtc are added and &drm_connector_helper_funcs.atomic_check is run on them.
566 * 4. &drm_encoder_helper_funcs.mode_valid, &drm_bridge_funcs.mode_valid and
567 * &drm_crtc_helper_funcs.mode_valid are called on the affected components.
568 * 5. &drm_bridge_funcs.mode_fixup is called on all encoder bridges.
569 * 6. &drm_encoder_helper_funcs.atomic_check is called to validate any encoder state.
570 * This function is only called when the encoder will be part of a configured crtc,
571 * it must not be used for implementing connector property validation.
572 * If this function is NULL, &drm_atomic_encoder_helper_funcs.mode_fixup is called
573 * instead.
574 * 7. &drm_crtc_helper_funcs.mode_fixup is called last, to fix up the mode with crtc constraints.
575 *
576 * &drm_crtc_state.mode_changed is set when the input mode is changed.
577 * &drm_crtc_state.connectors_changed is set when a connector is added or
578 * removed from the crtc. &drm_crtc_state.active_changed is set when
579 * &drm_crtc_state.active changes, which is used for DPMS.
580 * See also: drm_atomic_crtc_needs_modeset()
581 *
582 * IMPORTANT:
583 *
584 * Drivers which set &drm_crtc_state.mode_changed (e.g. in their
585 * &drm_plane_helper_funcs.atomic_check hooks if a plane update can't be done
586 * without a full modeset) _must_ call this function afterwards after that
587 * change. It is permitted to call this function multiple times for the same
588 * update, e.g. when the &drm_crtc_helper_funcs.atomic_check functions depend
589 * upon the adjusted dotclock for fifo space allocation and watermark
590 * computation.
591 *
592 * RETURNS:
593 * Zero for success or -errno
594 */
595int
596drm_atomic_helper_check_modeset(struct drm_device *dev,
597 struct drm_atomic_state *state)
598{
599 struct drm_crtc *crtc;
600 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
601 struct drm_connector *connector;
602 struct drm_connector_state *old_connector_state, *new_connector_state;
603 int i, ret;
604 unsigned connectors_mask = 0;
605
606 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
607 bool has_connectors =
608 !!new_crtc_state->connector_mask;
609
610 WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
611
612 if (!drm_mode_equal(&old_crtc_state->mode, &new_crtc_state->mode)) {
613 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode changed\n",
614 crtc->base.id, crtc->name);
615 new_crtc_state->mode_changed = true;
616 }
617
618 if (old_crtc_state->enable != new_crtc_state->enable) {
619 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enable changed\n",
620 crtc->base.id, crtc->name);
621
622 /*
623 * For clarity this assignment is done here, but
624 * enable == 0 is only true when there are no
625 * connectors and a NULL mode.
626 *
627 * The other way around is true as well. enable != 0
628 * iff connectors are attached and a mode is set.
629 */
630 new_crtc_state->mode_changed = true;
631 new_crtc_state->connectors_changed = true;
632 }
633
634 if (old_crtc_state->active != new_crtc_state->active) {
635 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active changed\n",
636 crtc->base.id, crtc->name);
637 new_crtc_state->active_changed = true;
638 }
639
640 if (new_crtc_state->enable != has_connectors) {
641 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled/connectors mismatch\n",
642 crtc->base.id, crtc->name);
643
644 return -EINVAL;
645 }
646 }
647
648 ret = handle_conflicting_encoders(state, false);
649 if (ret)
650 return ret;
651
652 for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
653 const struct drm_connector_helper_funcs *funcs = connector->helper_private;
654
655 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
656
657 /*
658 * This only sets crtc->connectors_changed for routing changes,
659 * drivers must set crtc->connectors_changed themselves when
660 * connector properties need to be updated.
661 */
662 ret = update_connector_routing(state, connector,
663 old_connector_state,
664 new_connector_state);
665 if (ret)
666 return ret;
667 if (old_connector_state->crtc) {
668 new_crtc_state = drm_atomic_get_new_crtc_state(state,
669 old_connector_state->crtc);
670 if (old_connector_state->link_status !=
671 new_connector_state->link_status)
672 new_crtc_state->connectors_changed = true;
673
674 if (old_connector_state->max_requested_bpc !=
675 new_connector_state->max_requested_bpc)
676 new_crtc_state->connectors_changed = true;
677 }
678
679 if (funcs->atomic_check)
680 ret = funcs->atomic_check(connector, new_connector_state);
681 if (ret)
682 return ret;
683
684 connectors_mask |= BIT(i);
685 }
686
687 /*
688 * After all the routing has been prepared we need to add in any
689 * connector which is itself unchanged, but who's crtc changes it's
690 * configuration. This must be done before calling mode_fixup in case a
691 * crtc only changed its mode but has the same set of connectors.
692 */
693 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
694 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
695 continue;
696
697 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] needs all connectors, enable: %c, active: %c\n",
698 crtc->base.id, crtc->name,
699 new_crtc_state->enable ? 'y' : 'n',
700 new_crtc_state->active ? 'y' : 'n');
701
702 ret = drm_atomic_add_affected_connectors(state, crtc);
703 if (ret != 0)
704 return ret;
705
706 ret = drm_atomic_add_affected_planes(state, crtc);
707 if (ret != 0)
708 return ret;
709 }
710
711 /*
712 * Iterate over all connectors again, to make sure atomic_check()
713 * has been called on them when a modeset is forced.
714 */
715 for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
716 const struct drm_connector_helper_funcs *funcs = connector->helper_private;
717
718 if (connectors_mask & BIT(i))
719 continue;
720
721 if (funcs->atomic_check)
722 ret = funcs->atomic_check(connector, new_connector_state);
723 if (ret)
724 return ret;
725 }
726
727 ret = mode_valid(state);
728 if (ret)
729 return ret;
730
731 return mode_fixup(state);
732}
733EXPORT_SYMBOL(drm_atomic_helper_check_modeset);
734
735/**
736 * drm_atomic_helper_check_plane_state() - Check plane state for validity
737 * @plane_state: plane state to check
738 * @crtc_state: crtc state to check
739 * @min_scale: minimum @src:@dest scaling factor in 16.16 fixed point
740 * @max_scale: maximum @src:@dest scaling factor in 16.16 fixed point
741 * @can_position: is it legal to position the plane such that it
742 * doesn't cover the entire crtc? This will generally
743 * only be false for primary planes.
744 * @can_update_disabled: can the plane be updated while the crtc
745 * is disabled?
746 *
747 * Checks that a desired plane update is valid, and updates various
748 * bits of derived state (clipped coordinates etc.). Drivers that provide
749 * their own plane handling rather than helper-provided implementations may
750 * still wish to call this function to avoid duplication of error checking
751 * code.
752 *
753 * RETURNS:
754 * Zero if update appears valid, error code on failure
755 */
756int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state,
757 const struct drm_crtc_state *crtc_state,
758 int min_scale,
759 int max_scale,
760 bool can_position,
761 bool can_update_disabled)
762{
763 struct drm_framebuffer *fb = plane_state->fb;
764 struct drm_rect *src = &plane_state->src;
765 struct drm_rect *dst = &plane_state->dst;
766 unsigned int rotation = plane_state->rotation;
767 struct drm_rect clip = {};
768 int hscale, vscale;
769
770 WARN_ON(plane_state->crtc && plane_state->crtc != crtc_state->crtc);
771
772 *src = drm_plane_state_src(plane_state);
773 *dst = drm_plane_state_dest(plane_state);
774
775 if (!fb) {
776 plane_state->visible = false;
777 return 0;
778 }
779
780 /* crtc should only be NULL when disabling (i.e., !fb) */
781 if (WARN_ON(!plane_state->crtc)) {
782 plane_state->visible = false;
783 return 0;
784 }
785
786 if (!crtc_state->enable && !can_update_disabled) {
787 DRM_DEBUG_KMS("Cannot update plane of a disabled CRTC.\n");
788 return -EINVAL;
789 }
790
791 drm_rect_rotate(src, fb->width << 16, fb->height << 16, rotation);
792
793 /* Check scaling */
794 hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale);
795 vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale);
796 if (hscale < 0 || vscale < 0) {
797 DRM_DEBUG_KMS("Invalid scaling of plane\n");
798 drm_rect_debug_print("src: ", &plane_state->src, true);
799 drm_rect_debug_print("dst: ", &plane_state->dst, false);
800 return -ERANGE;
801 }
802
803 if (crtc_state->enable)
804 drm_mode_get_hv_timing(&crtc_state->mode, &clip.x2, &clip.y2);
805
806 plane_state->visible = drm_rect_clip_scaled(src, dst, &clip);
807
808 drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16, rotation);
809
810 if (!plane_state->visible)
811 /*
812 * Plane isn't visible; some drivers can handle this
813 * so we just return success here. Drivers that can't
814 * (including those that use the primary plane helper's
815 * update function) will return an error from their
816 * update_plane handler.
817 */
818 return 0;
819
820 if (!can_position && !drm_rect_equals(dst, &clip)) {
821 DRM_DEBUG_KMS("Plane must cover entire CRTC\n");
822 drm_rect_debug_print("dst: ", dst, false);
823 drm_rect_debug_print("clip: ", &clip, false);
824 return -EINVAL;
825 }
826
827 return 0;
828}
829EXPORT_SYMBOL(drm_atomic_helper_check_plane_state);
830
831/**
832 * drm_atomic_helper_check_planes - validate state object for planes changes
833 * @dev: DRM device
834 * @state: the driver state object
835 *
836 * Check the state object to see if the requested state is physically possible.
837 * This does all the plane update related checks using by calling into the
838 * &drm_crtc_helper_funcs.atomic_check and &drm_plane_helper_funcs.atomic_check
839 * hooks provided by the driver.
840 *
841 * It also sets &drm_crtc_state.planes_changed to indicate that a crtc has
842 * updated planes.
843 *
844 * RETURNS:
845 * Zero for success or -errno
846 */
847int
848drm_atomic_helper_check_planes(struct drm_device *dev,
849 struct drm_atomic_state *state)
850{
851 struct drm_crtc *crtc;
852 struct drm_crtc_state *new_crtc_state;
853 struct drm_plane *plane;
854 struct drm_plane_state *new_plane_state, *old_plane_state;
855 int i, ret = 0;
856
857 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
858 const struct drm_plane_helper_funcs *funcs;
859
860 WARN_ON(!drm_modeset_is_locked(&plane->mutex));
861
862 funcs = plane->helper_private;
863
864 drm_atomic_helper_plane_changed(state, old_plane_state, new_plane_state, plane);
865
866 drm_atomic_helper_check_plane_damage(state, new_plane_state);
867
868 if (!funcs || !funcs->atomic_check)
869 continue;
870
871 ret = funcs->atomic_check(plane, new_plane_state);
872 if (ret) {
873 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic driver check failed\n",
874 plane->base.id, plane->name);
875 return ret;
876 }
877 }
878
879 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
880 const struct drm_crtc_helper_funcs *funcs;
881
882 funcs = crtc->helper_private;
883
884 if (!funcs || !funcs->atomic_check)
885 continue;
886
887 ret = funcs->atomic_check(crtc, new_crtc_state);
888 if (ret) {
889 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n",
890 crtc->base.id, crtc->name);
891 return ret;
892 }
893 }
894
895 return ret;
896}
897EXPORT_SYMBOL(drm_atomic_helper_check_planes);
898
899/**
900 * drm_atomic_helper_check - validate state object
901 * @dev: DRM device
902 * @state: the driver state object
903 *
904 * Check the state object to see if the requested state is physically possible.
905 * Only crtcs and planes have check callbacks, so for any additional (global)
906 * checking that a driver needs it can simply wrap that around this function.
907 * Drivers without such needs can directly use this as their
908 * &drm_mode_config_funcs.atomic_check callback.
909 *
910 * This just wraps the two parts of the state checking for planes and modeset
911 * state in the default order: First it calls drm_atomic_helper_check_modeset()
912 * and then drm_atomic_helper_check_planes(). The assumption is that the
913 * @drm_plane_helper_funcs.atomic_check and @drm_crtc_helper_funcs.atomic_check
914 * functions depend upon an updated adjusted_mode.clock to e.g. properly compute
915 * watermarks.
916 *
917 * Note that zpos normalization will add all enable planes to the state which
918 * might not desired for some drivers.
919 * For example enable/disable of a cursor plane which have fixed zpos value
920 * would trigger all other enabled planes to be forced to the state change.
921 *
922 * RETURNS:
923 * Zero for success or -errno
924 */
925int drm_atomic_helper_check(struct drm_device *dev,
926 struct drm_atomic_state *state)
927{
928 int ret;
929
930 ret = drm_atomic_helper_check_modeset(dev, state);
931 if (ret)
932 return ret;
933
934 if (dev->mode_config.normalize_zpos) {
935 ret = drm_atomic_normalize_zpos(dev, state);
936 if (ret)
937 return ret;
938 }
939
940 ret = drm_atomic_helper_check_planes(dev, state);
941 if (ret)
942 return ret;
943
944 if (state->legacy_cursor_update)
945 state->async_update = !drm_atomic_helper_async_check(dev, state);
946
947 return ret;
948}
949EXPORT_SYMBOL(drm_atomic_helper_check);
950
951static void
952disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
953{
954 struct drm_connector *connector;
955 struct drm_connector_state *old_conn_state, *new_conn_state;
956 struct drm_crtc *crtc;
957 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
958 int i;
959
960 for_each_oldnew_connector_in_state(old_state, connector, old_conn_state, new_conn_state, i) {
961 const struct drm_encoder_helper_funcs *funcs;
962 struct drm_encoder *encoder;
963
964 /* Shut down everything that's in the changeset and currently
965 * still on. So need to check the old, saved state. */
966 if (!old_conn_state->crtc)
967 continue;
968
969 old_crtc_state = drm_atomic_get_old_crtc_state(old_state, old_conn_state->crtc);
970
971 if (!old_crtc_state->active ||
972 !drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state))
973 continue;
974
975 encoder = old_conn_state->best_encoder;
976
977 /* We shouldn't get this far if we didn't previously have
978 * an encoder.. but WARN_ON() rather than explode.
979 */
980 if (WARN_ON(!encoder))
981 continue;
982
983 funcs = encoder->helper_private;
984
985 DRM_DEBUG_ATOMIC("disabling [ENCODER:%d:%s]\n",
986 encoder->base.id, encoder->name);
987
988 /*
989 * Each encoder has at most one connector (since we always steal
990 * it away), so we won't call disable hooks twice.
991 */
992 drm_bridge_disable(encoder->bridge);
993
994 /* Right function depends upon target state. */
995 if (funcs) {
996 if (new_conn_state->crtc && funcs->prepare)
997 funcs->prepare(encoder);
998 else if (funcs->disable)
999 funcs->disable(encoder);
1000 else if (funcs->dpms)
1001 funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
1002 }
1003
1004 drm_bridge_post_disable(encoder->bridge);
1005 }
1006
1007 for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
1008 const struct drm_crtc_helper_funcs *funcs;
1009 int ret;
1010
1011 /* Shut down everything that needs a full modeset. */
1012 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
1013 continue;
1014
1015 if (!old_crtc_state->active)
1016 continue;
1017
1018 funcs = crtc->helper_private;
1019
1020 DRM_DEBUG_ATOMIC("disabling [CRTC:%d:%s]\n",
1021 crtc->base.id, crtc->name);
1022
1023
1024 /* Right function depends upon target state. */
1025 if (new_crtc_state->enable && funcs->prepare)
1026 funcs->prepare(crtc);
1027 else if (funcs->atomic_disable)
1028 funcs->atomic_disable(crtc, old_crtc_state);
1029 else if (funcs->disable)
1030 funcs->disable(crtc);
1031 else
1032 funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
1033
1034 if (!(dev->irq_enabled && dev->num_crtcs))
1035 continue;
1036
1037 ret = drm_crtc_vblank_get(crtc);
1038 WARN_ONCE(ret != -EINVAL, "driver forgot to call drm_crtc_vblank_off()\n");
1039 if (ret == 0)
1040 drm_crtc_vblank_put(crtc);
1041 }
1042}
1043
1044/**
1045 * drm_atomic_helper_update_legacy_modeset_state - update legacy modeset state
1046 * @dev: DRM device
1047 * @old_state: atomic state object with old state structures
1048 *
1049 * This function updates all the various legacy modeset state pointers in
1050 * connectors, encoders and crtcs. It also updates the timestamping constants
1051 * used for precise vblank timestamps by calling
1052 * drm_calc_timestamping_constants().
1053 *
1054 * Drivers can use this for building their own atomic commit if they don't have
1055 * a pure helper-based modeset implementation.
1056 *
1057 * Since these updates are not synchronized with lockings, only code paths
1058 * called from &drm_mode_config_helper_funcs.atomic_commit_tail can look at the
1059 * legacy state filled out by this helper. Defacto this means this helper and
1060 * the legacy state pointers are only really useful for transitioning an
1061 * existing driver to the atomic world.
1062 */
1063void
1064drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
1065 struct drm_atomic_state *old_state)
1066{
1067 struct drm_connector *connector;
1068 struct drm_connector_state *old_conn_state, *new_conn_state;
1069 struct drm_crtc *crtc;
1070 struct drm_crtc_state *new_crtc_state;
1071 int i;
1072
1073 /* clear out existing links and update dpms */
1074 for_each_oldnew_connector_in_state(old_state, connector, old_conn_state, new_conn_state, i) {
1075 if (connector->encoder) {
1076 WARN_ON(!connector->encoder->crtc);
1077
1078 connector->encoder->crtc = NULL;
1079 connector->encoder = NULL;
1080 }
1081
1082 crtc = new_conn_state->crtc;
1083 if ((!crtc && old_conn_state->crtc) ||
1084 (crtc && drm_atomic_crtc_needs_modeset(crtc->state))) {
1085 int mode = DRM_MODE_DPMS_OFF;
1086
1087 if (crtc && crtc->state->active)
1088 mode = DRM_MODE_DPMS_ON;
1089
1090 connector->dpms = mode;
1091 }
1092 }
1093
1094 /* set new links */
1095 for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
1096 if (!new_conn_state->crtc)
1097 continue;
1098
1099 if (WARN_ON(!new_conn_state->best_encoder))
1100 continue;
1101
1102 connector->encoder = new_conn_state->best_encoder;
1103 connector->encoder->crtc = new_conn_state->crtc;
1104 }
1105
1106 /* set legacy state in the crtc structure */
1107 for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
1108 struct drm_plane *primary = crtc->primary;
1109 struct drm_plane_state *new_plane_state;
1110
1111 crtc->mode = new_crtc_state->mode;
1112 crtc->enabled = new_crtc_state->enable;
1113
1114 new_plane_state =
1115 drm_atomic_get_new_plane_state(old_state, primary);
1116
1117 if (new_plane_state && new_plane_state->crtc == crtc) {
1118 crtc->x = new_plane_state->src_x >> 16;
1119 crtc->y = new_plane_state->src_y >> 16;
1120 }
1121
1122 if (new_crtc_state->enable)
1123 drm_calc_timestamping_constants(crtc,
1124 &new_crtc_state->adjusted_mode);
1125 }
1126}
1127EXPORT_SYMBOL(drm_atomic_helper_update_legacy_modeset_state);
1128
1129static void
1130crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
1131{
1132 struct drm_crtc *crtc;
1133 struct drm_crtc_state *new_crtc_state;
1134 struct drm_connector *connector;
1135 struct drm_connector_state *new_conn_state;
1136 int i;
1137
1138 for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
1139 const struct drm_crtc_helper_funcs *funcs;
1140
1141 if (!new_crtc_state->mode_changed)
1142 continue;
1143
1144 funcs = crtc->helper_private;
1145
1146 if (new_crtc_state->enable && funcs->mode_set_nofb) {
1147 DRM_DEBUG_ATOMIC("modeset on [CRTC:%d:%s]\n",
1148 crtc->base.id, crtc->name);
1149
1150 funcs->mode_set_nofb(crtc);
1151 }
1152 }
1153
1154 for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
1155 const struct drm_encoder_helper_funcs *funcs;
1156 struct drm_encoder *encoder;
1157 struct drm_display_mode *mode, *adjusted_mode;
1158
1159 if (!new_conn_state->best_encoder)
1160 continue;
1161
1162 encoder = new_conn_state->best_encoder;
1163 funcs = encoder->helper_private;
1164 new_crtc_state = new_conn_state->crtc->state;
1165 mode = &new_crtc_state->mode;
1166 adjusted_mode = &new_crtc_state->adjusted_mode;
1167
1168 if (!new_crtc_state->mode_changed)
1169 continue;
1170
1171 DRM_DEBUG_ATOMIC("modeset on [ENCODER:%d:%s]\n",
1172 encoder->base.id, encoder->name);
1173
1174 /*
1175 * Each encoder has at most one connector (since we always steal
1176 * it away), so we won't call mode_set hooks twice.
1177 */
1178 if (funcs && funcs->atomic_mode_set) {
1179 funcs->atomic_mode_set(encoder, new_crtc_state,
1180 new_conn_state);
1181 } else if (funcs && funcs->mode_set) {
1182 funcs->mode_set(encoder, mode, adjusted_mode);
1183 }
1184
1185 drm_bridge_mode_set(encoder->bridge, mode, adjusted_mode);
1186 }
1187}
1188
1189/**
1190 * drm_atomic_helper_commit_modeset_disables - modeset commit to disable outputs
1191 * @dev: DRM device
1192 * @old_state: atomic state object with old state structures
1193 *
1194 * This function shuts down all the outputs that need to be shut down and
1195 * prepares them (if required) with the new mode.
1196 *
1197 * For compatibility with legacy crtc helpers this should be called before
1198 * drm_atomic_helper_commit_planes(), which is what the default commit function
1199 * does. But drivers with different needs can group the modeset commits together
1200 * and do the plane commits at the end. This is useful for drivers doing runtime
1201 * PM since planes updates then only happen when the CRTC is actually enabled.
1202 */
1203void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
1204 struct drm_atomic_state *old_state)
1205{
1206 disable_outputs(dev, old_state);
1207
1208 drm_atomic_helper_update_legacy_modeset_state(dev, old_state);
1209
1210 crtc_set_mode(dev, old_state);
1211}
1212EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_disables);
1213
1214static void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
1215 struct drm_atomic_state *old_state)
1216{
1217 struct drm_connector *connector;
1218 struct drm_connector_state *new_conn_state;
1219 int i;
1220
1221 for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
1222 const struct drm_connector_helper_funcs *funcs;
1223
1224 funcs = connector->helper_private;
1225 if (!funcs->atomic_commit)
1226 continue;
1227
1228 if (new_conn_state->writeback_job && new_conn_state->writeback_job->fb) {
1229 WARN_ON(connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK);
1230 funcs->atomic_commit(connector, new_conn_state);
1231 }
1232 }
1233}
1234
1235/**
1236 * drm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs
1237 * @dev: DRM device
1238 * @old_state: atomic state object with old state structures
1239 *
1240 * This function enables all the outputs with the new configuration which had to
1241 * be turned off for the update.
1242 *
1243 * For compatibility with legacy crtc helpers this should be called after
1244 * drm_atomic_helper_commit_planes(), which is what the default commit function
1245 * does. But drivers with different needs can group the modeset commits together
1246 * and do the plane commits at the end. This is useful for drivers doing runtime
1247 * PM since planes updates then only happen when the CRTC is actually enabled.
1248 */
1249void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
1250 struct drm_atomic_state *old_state)
1251{
1252 struct drm_crtc *crtc;
1253 struct drm_crtc_state *old_crtc_state;
1254 struct drm_crtc_state *new_crtc_state;
1255 struct drm_connector *connector;
1256 struct drm_connector_state *new_conn_state;
1257 int i;
1258
1259 for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
1260 const struct drm_crtc_helper_funcs *funcs;
1261
1262 /* Need to filter out CRTCs where only planes change. */
1263 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
1264 continue;
1265
1266 if (!new_crtc_state->active)
1267 continue;
1268
1269 funcs = crtc->helper_private;
1270
1271 if (new_crtc_state->enable) {
1272 DRM_DEBUG_ATOMIC("enabling [CRTC:%d:%s]\n",
1273 crtc->base.id, crtc->name);
1274
1275 if (funcs->atomic_enable)
1276 funcs->atomic_enable(crtc, old_crtc_state);
1277 else
1278 funcs->commit(crtc);
1279 }
1280 }
1281
1282 for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
1283 const struct drm_encoder_helper_funcs *funcs;
1284 struct drm_encoder *encoder;
1285
1286 if (!new_conn_state->best_encoder)
1287 continue;
1288
1289 if (!new_conn_state->crtc->state->active ||
1290 !drm_atomic_crtc_needs_modeset(new_conn_state->crtc->state))
1291 continue;
1292
1293 encoder = new_conn_state->best_encoder;
1294 funcs = encoder->helper_private;
1295
1296 DRM_DEBUG_ATOMIC("enabling [ENCODER:%d:%s]\n",
1297 encoder->base.id, encoder->name);
1298
1299 /*
1300 * Each encoder has at most one connector (since we always steal
1301 * it away), so we won't call enable hooks twice.
1302 */
1303 drm_bridge_pre_enable(encoder->bridge);
1304
1305 if (funcs) {
1306 if (funcs->enable)
1307 funcs->enable(encoder);
1308 else if (funcs->commit)
1309 funcs->commit(encoder);
1310 }
1311
1312 drm_bridge_enable(encoder->bridge);
1313 }
1314
1315 drm_atomic_helper_commit_writebacks(dev, old_state);
1316}
1317EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables);
1318
1319/**
1320 * drm_atomic_helper_wait_for_fences - wait for fences stashed in plane state
1321 * @dev: DRM device
1322 * @state: atomic state object with old state structures
1323 * @pre_swap: If true, do an interruptible wait, and @state is the new state.
1324 * Otherwise @state is the old state.
1325 *
1326 * For implicit sync, driver should fish the exclusive fence out from the
1327 * incoming fb's and stash it in the drm_plane_state. This is called after
1328 * drm_atomic_helper_swap_state() so it uses the current plane state (and
1329 * just uses the atomic state to find the changed planes)
1330 *
1331 * Note that @pre_swap is needed since the point where we block for fences moves
1332 * around depending upon whether an atomic commit is blocking or
1333 * non-blocking. For non-blocking commit all waiting needs to happen after
1334 * drm_atomic_helper_swap_state() is called, but for blocking commits we want
1335 * to wait **before** we do anything that can't be easily rolled back. That is
1336 * before we call drm_atomic_helper_swap_state().
1337 *
1338 * Returns zero if success or < 0 if dma_fence_wait() fails.
1339 */
1340int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
1341 struct drm_atomic_state *state,
1342 bool pre_swap)
1343{
1344 struct drm_plane *plane;
1345 struct drm_plane_state *new_plane_state;
1346 int i, ret;
1347
1348 for_each_new_plane_in_state(state, plane, new_plane_state, i) {
1349 if (!new_plane_state->fence)
1350 continue;
1351
1352 WARN_ON(!new_plane_state->fb);
1353
1354 /*
1355 * If waiting for fences pre-swap (ie: nonblock), userspace can
1356 * still interrupt the operation. Instead of blocking until the
1357 * timer expires, make the wait interruptible.
1358 */
1359 ret = dma_fence_wait(new_plane_state->fence, pre_swap);
1360 if (ret)
1361 return ret;
1362
1363 dma_fence_put(new_plane_state->fence);
1364 new_plane_state->fence = NULL;
1365 }
1366
1367 return 0;
1368}
1369EXPORT_SYMBOL(drm_atomic_helper_wait_for_fences);
1370
1371/**
1372 * drm_atomic_helper_wait_for_vblanks - wait for vblank on crtcs
1373 * @dev: DRM device
1374 * @old_state: atomic state object with old state structures
1375 *
1376 * Helper to, after atomic commit, wait for vblanks on all effected
1377 * crtcs (ie. before cleaning up old framebuffers using
1378 * drm_atomic_helper_cleanup_planes()). It will only wait on CRTCs where the
1379 * framebuffers have actually changed to optimize for the legacy cursor and
1380 * plane update use-case.
1381 *
1382 * Drivers using the nonblocking commit tracking support initialized by calling
1383 * drm_atomic_helper_setup_commit() should look at
1384 * drm_atomic_helper_wait_for_flip_done() as an alternative.
1385 */
1386void
1387drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
1388 struct drm_atomic_state *old_state)
1389{
1390 struct drm_crtc *crtc;
1391 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1392 int i, ret;
1393 unsigned crtc_mask = 0;
1394
1395 /*
1396 * Legacy cursor ioctls are completely unsynced, and userspace
1397 * relies on that (by doing tons of cursor updates).
1398 */
1399 if (old_state->legacy_cursor_update)
1400 return;
1401
1402 for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
1403 if (!new_crtc_state->active)
1404 continue;
1405
1406 ret = drm_crtc_vblank_get(crtc);
1407 if (ret != 0)
1408 continue;
1409
1410 crtc_mask |= drm_crtc_mask(crtc);
1411 old_state->crtcs[i].last_vblank_count = drm_crtc_vblank_count(crtc);
1412 }
1413
1414 for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
1415 if (!(crtc_mask & drm_crtc_mask(crtc)))
1416 continue;
1417
1418 ret = wait_event_timeout(dev->vblank[i].queue,
1419 old_state->crtcs[i].last_vblank_count !=
1420 drm_crtc_vblank_count(crtc),
1421 msecs_to_jiffies(50));
1422
1423 WARN(!ret, "[CRTC:%d:%s] vblank wait timed out\n",
1424 crtc->base.id, crtc->name);
1425
1426 drm_crtc_vblank_put(crtc);
1427 }
1428}
1429EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
1430
1431/**
1432 * drm_atomic_helper_wait_for_flip_done - wait for all page flips to be done
1433 * @dev: DRM device
1434 * @old_state: atomic state object with old state structures
1435 *
1436 * Helper to, after atomic commit, wait for page flips on all effected
1437 * crtcs (ie. before cleaning up old framebuffers using
1438 * drm_atomic_helper_cleanup_planes()). Compared to
1439 * drm_atomic_helper_wait_for_vblanks() this waits for the completion of on all
1440 * CRTCs, assuming that cursors-only updates are signalling their completion
1441 * immediately (or using a different path).
1442 *
1443 * This requires that drivers use the nonblocking commit tracking support
1444 * initialized using drm_atomic_helper_setup_commit().
1445 */
1446void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev,
1447 struct drm_atomic_state *old_state)
1448{
1449 struct drm_crtc *crtc;
1450 int i;
1451
1452 for (i = 0; i < dev->mode_config.num_crtc; i++) {
1453 struct drm_crtc_commit *commit = old_state->crtcs[i].commit;
1454 int ret;
1455
1456 crtc = old_state->crtcs[i].ptr;
1457
1458 if (!crtc || !commit)
1459 continue;
1460
1461 ret = wait_for_completion_timeout(&commit->flip_done, 10 * HZ);
1462 if (ret == 0)
1463 DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n",
1464 crtc->base.id, crtc->name);
1465 }
1466
1467 if (old_state->fake_commit)
1468 complete_all(&old_state->fake_commit->flip_done);
1469}
1470EXPORT_SYMBOL(drm_atomic_helper_wait_for_flip_done);
1471
1472/**
1473 * drm_atomic_helper_commit_tail - commit atomic update to hardware
1474 * @old_state: atomic state object with old state structures
1475 *
1476 * This is the default implementation for the
1477 * &drm_mode_config_helper_funcs.atomic_commit_tail hook, for drivers
1478 * that do not support runtime_pm or do not need the CRTC to be
1479 * enabled to perform a commit. Otherwise, see
1480 * drm_atomic_helper_commit_tail_rpm().
1481 *
1482 * Note that the default ordering of how the various stages are called is to
1483 * match the legacy modeset helper library closest.
1484 */
1485void drm_atomic_helper_commit_tail(struct drm_atomic_state *old_state)
1486{
1487 struct drm_device *dev = old_state->dev;
1488
1489 drm_atomic_helper_commit_modeset_disables(dev, old_state);
1490
1491 drm_atomic_helper_commit_planes(dev, old_state, 0);
1492
1493 drm_atomic_helper_commit_modeset_enables(dev, old_state);
1494
1495 drm_atomic_helper_fake_vblank(old_state);
1496
1497 drm_atomic_helper_commit_hw_done(old_state);
1498
1499 drm_atomic_helper_wait_for_vblanks(dev, old_state);
1500
1501 drm_atomic_helper_cleanup_planes(dev, old_state);
1502}
1503EXPORT_SYMBOL(drm_atomic_helper_commit_tail);
1504
1505/**
1506 * drm_atomic_helper_commit_tail_rpm - commit atomic update to hardware
1507 * @old_state: new modeset state to be committed
1508 *
1509 * This is an alternative implementation for the
1510 * &drm_mode_config_helper_funcs.atomic_commit_tail hook, for drivers
1511 * that support runtime_pm or need the CRTC to be enabled to perform a
1512 * commit. Otherwise, one should use the default implementation
1513 * drm_atomic_helper_commit_tail().
1514 */
1515void drm_atomic_helper_commit_tail_rpm(struct drm_atomic_state *old_state)
1516{
1517 struct drm_device *dev = old_state->dev;
1518
1519 drm_atomic_helper_commit_modeset_disables(dev, old_state);
1520
1521 drm_atomic_helper_commit_modeset_enables(dev, old_state);
1522
1523 drm_atomic_helper_commit_planes(dev, old_state,
1524 DRM_PLANE_COMMIT_ACTIVE_ONLY);
1525
1526 drm_atomic_helper_fake_vblank(old_state);
1527
1528 drm_atomic_helper_commit_hw_done(old_state);
1529
1530 drm_atomic_helper_wait_for_vblanks(dev, old_state);
1531
1532 drm_atomic_helper_cleanup_planes(dev, old_state);
1533}
1534EXPORT_SYMBOL(drm_atomic_helper_commit_tail_rpm);
1535
1536static void commit_tail(struct drm_atomic_state *old_state)
1537{
1538 struct drm_device *dev = old_state->dev;
1539 const struct drm_mode_config_helper_funcs *funcs;
1540
1541 funcs = dev->mode_config.helper_private;
1542
1543 drm_atomic_helper_wait_for_fences(dev, old_state, false);
1544
1545 drm_atomic_helper_wait_for_dependencies(old_state);
1546
1547 if (funcs && funcs->atomic_commit_tail)
1548 funcs->atomic_commit_tail(old_state);
1549 else
1550 drm_atomic_helper_commit_tail(old_state);
1551
1552 drm_atomic_helper_commit_cleanup_done(old_state);
1553
1554 drm_atomic_state_put(old_state);
1555}
1556
1557static void commit_work(struct work_struct *work)
1558{
1559 struct drm_atomic_state *state = container_of(work,
1560 struct drm_atomic_state,
1561 commit_work);
1562 commit_tail(state);
1563}
1564
1565/**
1566 * drm_atomic_helper_async_check - check if state can be commited asynchronously
1567 * @dev: DRM device
1568 * @state: the driver state object
1569 *
1570 * This helper will check if it is possible to commit the state asynchronously.
1571 * Async commits are not supposed to swap the states like normal sync commits
1572 * but just do in-place changes on the current state.
1573 *
1574 * It will return 0 if the commit can happen in an asynchronous fashion or error
1575 * if not. Note that error just mean it can't be commited asynchronously, if it
1576 * fails the commit should be treated like a normal synchronous commit.
1577 */
1578int drm_atomic_helper_async_check(struct drm_device *dev,
1579 struct drm_atomic_state *state)
1580{
1581 struct drm_crtc *crtc;
1582 struct drm_crtc_state *crtc_state;
1583 struct drm_plane *plane = NULL;
1584 struct drm_plane_state *old_plane_state = NULL;
1585 struct drm_plane_state *new_plane_state = NULL;
1586 const struct drm_plane_helper_funcs *funcs;
1587 int i, n_planes = 0;
1588
1589 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1590 if (drm_atomic_crtc_needs_modeset(crtc_state))
1591 return -EINVAL;
1592 }
1593
1594 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
1595 n_planes++;
1596
1597 /* FIXME: we support only single plane updates for now */
1598 if (n_planes != 1)
1599 return -EINVAL;
1600
1601 if (!new_plane_state->crtc ||
1602 old_plane_state->crtc != new_plane_state->crtc)
1603 return -EINVAL;
1604
1605 funcs = plane->helper_private;
1606 if (!funcs->atomic_async_update)
1607 return -EINVAL;
1608
1609 if (new_plane_state->fence)
1610 return -EINVAL;
1611
1612 /*
1613 * Don't do an async update if there is an outstanding commit modifying
1614 * the plane. This prevents our async update's changes from getting
1615 * overridden by a previous synchronous update's state.
1616 */
1617 if (old_plane_state->commit &&
1618 !try_wait_for_completion(&old_plane_state->commit->hw_done))
1619 return -EBUSY;
1620
1621 return funcs->atomic_async_check(plane, new_plane_state);
1622}
1623EXPORT_SYMBOL(drm_atomic_helper_async_check);
1624
1625/**
1626 * drm_atomic_helper_async_commit - commit state asynchronously
1627 * @dev: DRM device
1628 * @state: the driver state object
1629 *
1630 * This function commits a state asynchronously, i.e., not vblank
1631 * synchronized. It should be used on a state only when
1632 * drm_atomic_async_check() succeeds. Async commits are not supposed to swap
1633 * the states like normal sync commits, but just do in-place changes on the
1634 * current state.
1635 */
1636void drm_atomic_helper_async_commit(struct drm_device *dev,
1637 struct drm_atomic_state *state)
1638{
1639 struct drm_plane *plane;
1640 struct drm_plane_state *plane_state;
1641 const struct drm_plane_helper_funcs *funcs;
1642 int i;
1643
1644 for_each_new_plane_in_state(state, plane, plane_state, i) {
1645 funcs = plane->helper_private;
1646 funcs->atomic_async_update(plane, plane_state);
1647
1648 /*
1649 * ->atomic_async_update() is supposed to update the
1650 * plane->state in-place, make sure at least common
1651 * properties have been properly updated.
1652 */
1653 WARN_ON_ONCE(plane->state->fb != plane_state->fb);
1654 WARN_ON_ONCE(plane->state->crtc_x != plane_state->crtc_x);
1655 WARN_ON_ONCE(plane->state->crtc_y != plane_state->crtc_y);
1656 WARN_ON_ONCE(plane->state->src_x != plane_state->src_x);
1657 WARN_ON_ONCE(plane->state->src_y != plane_state->src_y);
1658 }
1659}
1660EXPORT_SYMBOL(drm_atomic_helper_async_commit);
1661
1662/**
1663 * drm_atomic_helper_commit - commit validated state object
1664 * @dev: DRM device
1665 * @state: the driver state object
1666 * @nonblock: whether nonblocking behavior is requested.
1667 *
1668 * This function commits a with drm_atomic_helper_check() pre-validated state
1669 * object. This can still fail when e.g. the framebuffer reservation fails. This
1670 * function implements nonblocking commits, using
1671 * drm_atomic_helper_setup_commit() and related functions.
1672 *
1673 * Committing the actual hardware state is done through the
1674 * &drm_mode_config_helper_funcs.atomic_commit_tail callback, or it's default
1675 * implementation drm_atomic_helper_commit_tail().
1676 *
1677 * RETURNS:
1678 * Zero for success or -errno.
1679 */
1680int drm_atomic_helper_commit(struct drm_device *dev,
1681 struct drm_atomic_state *state,
1682 bool nonblock)
1683{
1684 int ret;
1685
1686 if (state->async_update) {
1687 ret = drm_atomic_helper_prepare_planes(dev, state);
1688 if (ret)
1689 return ret;
1690
1691 drm_atomic_helper_async_commit(dev, state);
1692 drm_atomic_helper_cleanup_planes(dev, state);
1693
1694 return 0;
1695 }
1696
1697 ret = drm_atomic_helper_setup_commit(state, nonblock);
1698 if (ret)
1699 return ret;
1700
1701 INIT_WORK(&state->commit_work, commit_work);
1702
1703 ret = drm_atomic_helper_prepare_planes(dev, state);
1704 if (ret)
1705 return ret;
1706
1707 if (!nonblock) {
1708 ret = drm_atomic_helper_wait_for_fences(dev, state, true);
1709 if (ret)
1710 goto err;
1711 }
1712
1713 /*
1714 * This is the point of no return - everything below never fails except
1715 * when the hw goes bonghits. Which means we can commit the new state on
1716 * the software side now.
1717 */
1718
1719 ret = drm_atomic_helper_swap_state(state, true);
1720 if (ret)
1721 goto err;
1722
1723 /*
1724 * Everything below can be run asynchronously without the need to grab
1725 * any modeset locks at all under one condition: It must be guaranteed
1726 * that the asynchronous work has either been cancelled (if the driver
1727 * supports it, which at least requires that the framebuffers get
1728 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
1729 * before the new state gets committed on the software side with
1730 * drm_atomic_helper_swap_state().
1731 *
1732 * This scheme allows new atomic state updates to be prepared and
1733 * checked in parallel to the asynchronous completion of the previous
1734 * update. Which is important since compositors need to figure out the
1735 * composition of the next frame right after having submitted the
1736 * current layout.
1737 *
1738 * NOTE: Commit work has multiple phases, first hardware commit, then
1739 * cleanup. We want them to overlap, hence need system_unbound_wq to
1740 * make sure work items don't artifically stall on each another.
1741 */
1742
1743 drm_atomic_state_get(state);
1744 if (nonblock)
1745 queue_work(system_unbound_wq, &state->commit_work);
1746 else
1747 commit_tail(state);
1748
1749 return 0;
1750
1751err:
1752 drm_atomic_helper_cleanup_planes(dev, state);
1753 return ret;
1754}
1755EXPORT_SYMBOL(drm_atomic_helper_commit);
1756
1757/**
1758 * DOC: implementing nonblocking commit
1759 *
1760 * Nonblocking atomic commits have to be implemented in the following sequence:
1761 *
1762 * 1. Run drm_atomic_helper_prepare_planes() first. This is the only function
1763 * which commit needs to call which can fail, so we want to run it first and
1764 * synchronously.
1765 *
1766 * 2. Synchronize with any outstanding nonblocking commit worker threads which
1767 * might be affected the new state update. This can be done by either cancelling
1768 * or flushing the work items, depending upon whether the driver can deal with
1769 * cancelled updates. Note that it is important to ensure that the framebuffer
1770 * cleanup is still done when cancelling.
1771 *
1772 * Asynchronous workers need to have sufficient parallelism to be able to run
1773 * different atomic commits on different CRTCs in parallel. The simplest way to
1774 * achive this is by running them on the &system_unbound_wq work queue. Note
1775 * that drivers are not required to split up atomic commits and run an
1776 * individual commit in parallel - userspace is supposed to do that if it cares.
1777 * But it might be beneficial to do that for modesets, since those necessarily
1778 * must be done as one global operation, and enabling or disabling a CRTC can
1779 * take a long time. But even that is not required.
1780 *
1781 * 3. The software state is updated synchronously with
1782 * drm_atomic_helper_swap_state(). Doing this under the protection of all modeset
1783 * locks means concurrent callers never see inconsistent state. And doing this
1784 * while it's guaranteed that no relevant nonblocking worker runs means that
1785 * nonblocking workers do not need grab any locks. Actually they must not grab
1786 * locks, for otherwise the work flushing will deadlock.
1787 *
1788 * 4. Schedule a work item to do all subsequent steps, using the split-out
1789 * commit helpers: a) pre-plane commit b) plane commit c) post-plane commit and
1790 * then cleaning up the framebuffers after the old framebuffer is no longer
1791 * being displayed.
1792 *
1793 * The above scheme is implemented in the atomic helper libraries in
1794 * drm_atomic_helper_commit() using a bunch of helper functions. See
1795 * drm_atomic_helper_setup_commit() for a starting point.
1796 */
1797
1798static int stall_checks(struct drm_crtc *crtc, bool nonblock)
1799{
1800 struct drm_crtc_commit *commit, *stall_commit = NULL;
1801 bool completed = true;
1802 int i;
1803 long ret = 0;
1804
1805 spin_lock(&crtc->commit_lock);
1806 i = 0;
1807 list_for_each_entry(commit, &crtc->commit_list, commit_entry) {
1808 if (i == 0) {
1809 completed = try_wait_for_completion(&commit->flip_done);
1810 /* Userspace is not allowed to get ahead of the previous
1811 * commit with nonblocking ones. */
1812 if (!completed && nonblock) {
1813 spin_unlock(&crtc->commit_lock);
1814 return -EBUSY;
1815 }
1816 } else if (i == 1) {
1817 stall_commit = drm_crtc_commit_get(commit);
1818 break;
1819 }
1820
1821 i++;
1822 }
1823 spin_unlock(&crtc->commit_lock);
1824
1825 if (!stall_commit)
1826 return 0;
1827
1828 /* We don't want to let commits get ahead of cleanup work too much,
1829 * stalling on 2nd previous commit means triple-buffer won't ever stall.
1830 */
1831 ret = wait_for_completion_interruptible_timeout(&stall_commit->cleanup_done,
1832 10*HZ);
1833 if (ret == 0)
1834 DRM_ERROR("[CRTC:%d:%s] cleanup_done timed out\n",
1835 crtc->base.id, crtc->name);
1836
1837 drm_crtc_commit_put(stall_commit);
1838
1839 return ret < 0 ? ret : 0;
1840}
1841
1842static void release_crtc_commit(struct completion *completion)
1843{
1844 struct drm_crtc_commit *commit = container_of(completion,
1845 typeof(*commit),
1846 flip_done);
1847
1848 drm_crtc_commit_put(commit);
1849}
1850
1851static void init_commit(struct drm_crtc_commit *commit, struct drm_crtc *crtc)
1852{
1853 init_completion(&commit->flip_done);
1854 init_completion(&commit->hw_done);
1855 init_completion(&commit->cleanup_done);
1856 INIT_LIST_HEAD(&commit->commit_entry);
1857 kref_init(&commit->ref);
1858 commit->crtc = crtc;
1859}
1860
1861static struct drm_crtc_commit *
1862crtc_or_fake_commit(struct drm_atomic_state *state, struct drm_crtc *crtc)
1863{
1864 if (crtc) {
1865 struct drm_crtc_state *new_crtc_state;
1866
1867 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
1868
1869 return new_crtc_state->commit;
1870 }
1871
1872 if (!state->fake_commit) {
1873 state->fake_commit = kzalloc(sizeof(*state->fake_commit), GFP_KERNEL);
1874 if (!state->fake_commit)
1875 return NULL;
1876
1877 init_commit(state->fake_commit, NULL);
1878 }
1879
1880 return state->fake_commit;
1881}
1882
1883/**
1884 * drm_atomic_helper_setup_commit - setup possibly nonblocking commit
1885 * @state: new modeset state to be committed
1886 * @nonblock: whether nonblocking behavior is requested.
1887 *
1888 * This function prepares @state to be used by the atomic helper's support for
1889 * nonblocking commits. Drivers using the nonblocking commit infrastructure
1890 * should always call this function from their
1891 * &drm_mode_config_funcs.atomic_commit hook.
1892 *
1893 * To be able to use this support drivers need to use a few more helper
1894 * functions. drm_atomic_helper_wait_for_dependencies() must be called before
1895 * actually committing the hardware state, and for nonblocking commits this call
1896 * must be placed in the async worker. See also drm_atomic_helper_swap_state()
1897 * and it's stall parameter, for when a driver's commit hooks look at the
1898 * &drm_crtc.state, &drm_plane.state or &drm_connector.state pointer directly.
1899 *
1900 * Completion of the hardware commit step must be signalled using
1901 * drm_atomic_helper_commit_hw_done(). After this step the driver is not allowed
1902 * to read or change any permanent software or hardware modeset state. The only
1903 * exception is state protected by other means than &drm_modeset_lock locks.
1904 * Only the free standing @state with pointers to the old state structures can
1905 * be inspected, e.g. to clean up old buffers using
1906 * drm_atomic_helper_cleanup_planes().
1907 *
1908 * At the very end, before cleaning up @state drivers must call
1909 * drm_atomic_helper_commit_cleanup_done().
1910 *
1911 * This is all implemented by in drm_atomic_helper_commit(), giving drivers a
1912 * complete and easy-to-use default implementation of the atomic_commit() hook.
1913 *
1914 * The tracking of asynchronously executed and still pending commits is done
1915 * using the core structure &drm_crtc_commit.
1916 *
1917 * By default there's no need to clean up resources allocated by this function
1918 * explicitly: drm_atomic_state_default_clear() will take care of that
1919 * automatically.
1920 *
1921 * Returns:
1922 *
1923 * 0 on success. -EBUSY when userspace schedules nonblocking commits too fast,
1924 * -ENOMEM on allocation failures and -EINTR when a signal is pending.
1925 */
1926int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
1927 bool nonblock)
1928{
1929 struct drm_crtc *crtc;
1930 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1931 struct drm_connector *conn;
1932 struct drm_connector_state *old_conn_state, *new_conn_state;
1933 struct drm_plane *plane;
1934 struct drm_plane_state *old_plane_state, *new_plane_state;
1935 struct drm_crtc_commit *commit;
1936 int i, ret;
1937
1938 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
1939 commit = kzalloc(sizeof(*commit), GFP_KERNEL);
1940 if (!commit)
1941 return -ENOMEM;
1942
1943 init_commit(commit, crtc);
1944
1945 new_crtc_state->commit = commit;
1946
1947 ret = stall_checks(crtc, nonblock);
1948 if (ret)
1949 return ret;
1950
1951 /* Drivers only send out events when at least either current or
1952 * new CRTC state is active. Complete right away if everything
1953 * stays off. */
1954 if (!old_crtc_state->active && !new_crtc_state->active) {
1955 complete_all(&commit->flip_done);
1956 continue;
1957 }
1958
1959 /* Legacy cursor updates are fully unsynced. */
1960 if (state->legacy_cursor_update) {
1961 complete_all(&commit->flip_done);
1962 continue;
1963 }
1964
1965 if (!new_crtc_state->event) {
1966 commit->event = kzalloc(sizeof(*commit->event),
1967 GFP_KERNEL);
1968 if (!commit->event)
1969 return -ENOMEM;
1970
1971 new_crtc_state->event = commit->event;
1972 }
1973
1974 new_crtc_state->event->base.completion = &commit->flip_done;
1975 new_crtc_state->event->base.completion_release = release_crtc_commit;
1976 drm_crtc_commit_get(commit);
1977
1978 commit->abort_completion = true;
1979
1980 state->crtcs[i].commit = commit;
1981 drm_crtc_commit_get(commit);
1982 }
1983
1984 for_each_oldnew_connector_in_state(state, conn, old_conn_state, new_conn_state, i) {
1985 /* Userspace is not allowed to get ahead of the previous
1986 * commit with nonblocking ones. */
1987 if (nonblock && old_conn_state->commit &&
1988 !try_wait_for_completion(&old_conn_state->commit->flip_done))
1989 return -EBUSY;
1990
1991 /* Always track connectors explicitly for e.g. link retraining. */
1992 commit = crtc_or_fake_commit(state, new_conn_state->crtc ?: old_conn_state->crtc);
1993 if (!commit)
1994 return -ENOMEM;
1995
1996 new_conn_state->commit = drm_crtc_commit_get(commit);
1997 }
1998
1999 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
2000 /* Userspace is not allowed to get ahead of the previous
2001 * commit with nonblocking ones. */
2002 if (nonblock && old_plane_state->commit &&
2003 !try_wait_for_completion(&old_plane_state->commit->flip_done))
2004 return -EBUSY;
2005
2006 /* Always track planes explicitly for async pageflip support. */
2007 commit = crtc_or_fake_commit(state, new_plane_state->crtc ?: old_plane_state->crtc);
2008 if (!commit)
2009 return -ENOMEM;
2010
2011 new_plane_state->commit = drm_crtc_commit_get(commit);
2012 }
2013
2014 return 0;
2015}
2016EXPORT_SYMBOL(drm_atomic_helper_setup_commit);
2017
2018/**
2019 * drm_atomic_helper_wait_for_dependencies - wait for required preceeding commits
2020 * @old_state: atomic state object with old state structures
2021 *
2022 * This function waits for all preceeding commits that touch the same CRTC as
2023 * @old_state to both be committed to the hardware (as signalled by
2024 * drm_atomic_helper_commit_hw_done) and executed by the hardware (as signalled
2025 * by calling drm_crtc_send_vblank_event() on the &drm_crtc_state.event).
2026 *
2027 * This is part of the atomic helper support for nonblocking commits, see
2028 * drm_atomic_helper_setup_commit() for an overview.
2029 */
2030void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *old_state)
2031{
2032 struct drm_crtc *crtc;
2033 struct drm_crtc_state *old_crtc_state;
2034 struct drm_plane *plane;
2035 struct drm_plane_state *old_plane_state;
2036 struct drm_connector *conn;
2037 struct drm_connector_state *old_conn_state;
2038 struct drm_crtc_commit *commit;
2039 int i;
2040 long ret;
2041
2042 for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
2043 commit = old_crtc_state->commit;
2044
2045 if (!commit)
2046 continue;
2047
2048 ret = wait_for_completion_timeout(&commit->hw_done,
2049 10*HZ);
2050 if (ret == 0)
2051 DRM_ERROR("[CRTC:%d:%s] hw_done timed out\n",
2052 crtc->base.id, crtc->name);
2053
2054 /* Currently no support for overwriting flips, hence
2055 * stall for previous one to execute completely. */
2056 ret = wait_for_completion_timeout(&commit->flip_done,
2057 10*HZ);
2058 if (ret == 0)
2059 DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n",
2060 crtc->base.id, crtc->name);
2061 }
2062
2063 for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
2064 commit = old_conn_state->commit;
2065
2066 if (!commit)
2067 continue;
2068
2069 ret = wait_for_completion_timeout(&commit->hw_done,
2070 10*HZ);
2071 if (ret == 0)
2072 DRM_ERROR("[CONNECTOR:%d:%s] hw_done timed out\n",
2073 conn->base.id, conn->name);
2074
2075 /* Currently no support for overwriting flips, hence
2076 * stall for previous one to execute completely. */
2077 ret = wait_for_completion_timeout(&commit->flip_done,
2078 10*HZ);
2079 if (ret == 0)
2080 DRM_ERROR("[CONNECTOR:%d:%s] flip_done timed out\n",
2081 conn->base.id, conn->name);
2082 }
2083
2084 for_each_old_plane_in_state(old_state, plane, old_plane_state, i) {
2085 commit = old_plane_state->commit;
2086
2087 if (!commit)
2088 continue;
2089
2090 ret = wait_for_completion_timeout(&commit->hw_done,
2091 10*HZ);
2092 if (ret == 0)
2093 DRM_ERROR("[PLANE:%d:%s] hw_done timed out\n",
2094 plane->base.id, plane->name);
2095
2096 /* Currently no support for overwriting flips, hence
2097 * stall for previous one to execute completely. */
2098 ret = wait_for_completion_timeout(&commit->flip_done,
2099 10*HZ);
2100 if (ret == 0)
2101 DRM_ERROR("[PLANE:%d:%s] flip_done timed out\n",
2102 plane->base.id, plane->name);
2103 }
2104}
2105EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
2106
2107/**
2108 * drm_atomic_helper_fake_vblank - fake VBLANK events if needed
2109 * @old_state: atomic state object with old state structures
2110 *
2111 * This function walks all CRTCs and fake VBLANK events on those with
2112 * &drm_crtc_state.no_vblank set to true and &drm_crtc_state.event != NULL.
2113 * The primary use of this function is writeback connectors working in oneshot
2114 * mode and faking VBLANK events. In this case they only fake the VBLANK event
2115 * when a job is queued, and any change to the pipeline that does not touch the
2116 * connector is leading to timeouts when calling
2117 * drm_atomic_helper_wait_for_vblanks() or
2118 * drm_atomic_helper_wait_for_flip_done().
2119 *
2120 * This is part of the atomic helper support for nonblocking commits, see
2121 * drm_atomic_helper_setup_commit() for an overview.
2122 */
2123void drm_atomic_helper_fake_vblank(struct drm_atomic_state *old_state)
2124{
2125 struct drm_crtc_state *new_crtc_state;
2126 struct drm_crtc *crtc;
2127 int i;
2128
2129 for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
2130 unsigned long flags;
2131
2132 if (!new_crtc_state->no_vblank)
2133 continue;
2134
2135 spin_lock_irqsave(&old_state->dev->event_lock, flags);
2136 if (new_crtc_state->event) {
2137 drm_crtc_send_vblank_event(crtc,
2138 new_crtc_state->event);
2139 new_crtc_state->event = NULL;
2140 }
2141 spin_unlock_irqrestore(&old_state->dev->event_lock, flags);
2142 }
2143}
2144EXPORT_SYMBOL(drm_atomic_helper_fake_vblank);
2145
2146/**
2147 * drm_atomic_helper_commit_hw_done - setup possible nonblocking commit
2148 * @old_state: atomic state object with old state structures
2149 *
2150 * This function is used to signal completion of the hardware commit step. After
2151 * this step the driver is not allowed to read or change any permanent software
2152 * or hardware modeset state. The only exception is state protected by other
2153 * means than &drm_modeset_lock locks.
2154 *
2155 * Drivers should try to postpone any expensive or delayed cleanup work after
2156 * this function is called.
2157 *
2158 * This is part of the atomic helper support for nonblocking commits, see
2159 * drm_atomic_helper_setup_commit() for an overview.
2160 */
2161void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *old_state)
2162{
2163 struct drm_crtc *crtc;
2164 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
2165 struct drm_crtc_commit *commit;
2166 int i;
2167
2168 for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
2169 commit = new_crtc_state->commit;
2170 if (!commit)
2171 continue;
2172
2173 /*
2174 * copy new_crtc_state->commit to old_crtc_state->commit,
2175 * it's unsafe to touch new_crtc_state after hw_done,
2176 * but we still need to do so in cleanup_done().
2177 */
2178 if (old_crtc_state->commit)
2179 drm_crtc_commit_put(old_crtc_state->commit);
2180
2181 old_crtc_state->commit = drm_crtc_commit_get(commit);
2182
2183 /* backend must have consumed any event by now */
2184 WARN_ON(new_crtc_state->event);
2185 complete_all(&commit->hw_done);
2186 }
2187
2188 if (old_state->fake_commit) {
2189 complete_all(&old_state->fake_commit->hw_done);
2190 complete_all(&old_state->fake_commit->flip_done);
2191 }
2192}
2193EXPORT_SYMBOL(drm_atomic_helper_commit_hw_done);
2194
2195/**
2196 * drm_atomic_helper_commit_cleanup_done - signal completion of commit
2197 * @old_state: atomic state object with old state structures
2198 *
2199 * This signals completion of the atomic update @old_state, including any
2200 * cleanup work. If used, it must be called right before calling
2201 * drm_atomic_state_put().
2202 *
2203 * This is part of the atomic helper support for nonblocking commits, see
2204 * drm_atomic_helper_setup_commit() for an overview.
2205 */
2206void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *old_state)
2207{
2208 struct drm_crtc *crtc;
2209 struct drm_crtc_state *old_crtc_state;
2210 struct drm_crtc_commit *commit;
2211 int i;
2212
2213 for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
2214 commit = old_crtc_state->commit;
2215 if (WARN_ON(!commit))
2216 continue;
2217
2218 complete_all(&commit->cleanup_done);
2219 WARN_ON(!try_wait_for_completion(&commit->hw_done));
2220
2221 spin_lock(&crtc->commit_lock);
2222 list_del(&commit->commit_entry);
2223 spin_unlock(&crtc->commit_lock);
2224 }
2225
2226 if (old_state->fake_commit) {
2227 complete_all(&old_state->fake_commit->cleanup_done);
2228 WARN_ON(!try_wait_for_completion(&old_state->fake_commit->hw_done));
2229 }
2230}
2231EXPORT_SYMBOL(drm_atomic_helper_commit_cleanup_done);
2232
2233/**
2234 * drm_atomic_helper_prepare_planes - prepare plane resources before commit
2235 * @dev: DRM device
2236 * @state: atomic state object with new state structures
2237 *
2238 * This function prepares plane state, specifically framebuffers, for the new
2239 * configuration, by calling &drm_plane_helper_funcs.prepare_fb. If any failure
2240 * is encountered this function will call &drm_plane_helper_funcs.cleanup_fb on
2241 * any already successfully prepared framebuffer.
2242 *
2243 * Returns:
2244 * 0 on success, negative error code on failure.
2245 */
2246int drm_atomic_helper_prepare_planes(struct drm_device *dev,
2247 struct drm_atomic_state *state)
2248{
2249 struct drm_plane *plane;
2250 struct drm_plane_state *new_plane_state;
2251 int ret, i, j;
2252
2253 for_each_new_plane_in_state(state, plane, new_plane_state, i) {
2254 const struct drm_plane_helper_funcs *funcs;
2255
2256 funcs = plane->helper_private;
2257
2258 if (funcs->prepare_fb) {
2259 ret = funcs->prepare_fb(plane, new_plane_state);
2260 if (ret)
2261 goto fail;
2262 }
2263 }
2264
2265 return 0;
2266
2267fail:
2268 for_each_new_plane_in_state(state, plane, new_plane_state, j) {
2269 const struct drm_plane_helper_funcs *funcs;
2270
2271 if (j >= i)
2272 continue;
2273
2274 funcs = plane->helper_private;
2275
2276 if (funcs->cleanup_fb)
2277 funcs->cleanup_fb(plane, new_plane_state);
2278 }
2279
2280 return ret;
2281}
2282EXPORT_SYMBOL(drm_atomic_helper_prepare_planes);
2283
2284static bool plane_crtc_active(const struct drm_plane_state *state)
2285{
2286 return state->crtc && state->crtc->state->active;
2287}
2288
2289/**
2290 * drm_atomic_helper_commit_planes - commit plane state
2291 * @dev: DRM device
2292 * @old_state: atomic state object with old state structures
2293 * @flags: flags for committing plane state
2294 *
2295 * This function commits the new plane state using the plane and atomic helper
2296 * functions for planes and crtcs. It assumes that the atomic state has already
2297 * been pushed into the relevant object state pointers, since this step can no
2298 * longer fail.
2299 *
2300 * It still requires the global state object @old_state to know which planes and
2301 * crtcs need to be updated though.
2302 *
2303 * Note that this function does all plane updates across all CRTCs in one step.
2304 * If the hardware can't support this approach look at
2305 * drm_atomic_helper_commit_planes_on_crtc() instead.
2306 *
2307 * Plane parameters can be updated by applications while the associated CRTC is
2308 * disabled. The DRM/KMS core will store the parameters in the plane state,
2309 * which will be available to the driver when the CRTC is turned on. As a result
2310 * most drivers don't need to be immediately notified of plane updates for a
2311 * disabled CRTC.
2312 *
2313 * Unless otherwise needed, drivers are advised to set the ACTIVE_ONLY flag in
2314 * @flags in order not to receive plane update notifications related to a
2315 * disabled CRTC. This avoids the need to manually ignore plane updates in
2316 * driver code when the driver and/or hardware can't or just don't need to deal
2317 * with updates on disabled CRTCs, for example when supporting runtime PM.
2318 *
2319 * Drivers may set the NO_DISABLE_AFTER_MODESET flag in @flags if the relevant
2320 * display controllers require to disable a CRTC's planes when the CRTC is
2321 * disabled. This function would skip the &drm_plane_helper_funcs.atomic_disable
2322 * call for a plane if the CRTC of the old plane state needs a modesetting
2323 * operation. Of course, the drivers need to disable the planes in their CRTC
2324 * disable callbacks since no one else would do that.
2325 *
2326 * The drm_atomic_helper_commit() default implementation doesn't set the
2327 * ACTIVE_ONLY flag to most closely match the behaviour of the legacy helpers.
2328 * This should not be copied blindly by drivers.
2329 */
2330void drm_atomic_helper_commit_planes(struct drm_device *dev,
2331 struct drm_atomic_state *old_state,
2332 uint32_t flags)
2333{
2334 struct drm_crtc *crtc;
2335 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
2336 struct drm_plane *plane;
2337 struct drm_plane_state *old_plane_state, *new_plane_state;
2338 int i;
2339 bool active_only = flags & DRM_PLANE_COMMIT_ACTIVE_ONLY;
2340 bool no_disable = flags & DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET;
2341
2342 for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
2343 const struct drm_crtc_helper_funcs *funcs;
2344
2345 funcs = crtc->helper_private;
2346
2347 if (!funcs || !funcs->atomic_begin)
2348 continue;
2349
2350 if (active_only && !new_crtc_state->active)
2351 continue;
2352
2353 funcs->atomic_begin(crtc, old_crtc_state);
2354 }
2355
2356 for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) {
2357 const struct drm_plane_helper_funcs *funcs;
2358 bool disabling;
2359
2360 funcs = plane->helper_private;
2361
2362 if (!funcs)
2363 continue;
2364
2365 disabling = drm_atomic_plane_disabling(old_plane_state,
2366 new_plane_state);
2367
2368 if (active_only) {
2369 /*
2370 * Skip planes related to inactive CRTCs. If the plane
2371 * is enabled use the state of the current CRTC. If the
2372 * plane is being disabled use the state of the old
2373 * CRTC to avoid skipping planes being disabled on an
2374 * active CRTC.
2375 */
2376 if (!disabling && !plane_crtc_active(new_plane_state))
2377 continue;
2378 if (disabling && !plane_crtc_active(old_plane_state))
2379 continue;
2380 }
2381
2382 /*
2383 * Special-case disabling the plane if drivers support it.
2384 */
2385 if (disabling && funcs->atomic_disable) {
2386 struct drm_crtc_state *crtc_state;
2387
2388 crtc_state = old_plane_state->crtc->state;
2389
2390 if (drm_atomic_crtc_needs_modeset(crtc_state) &&
2391 no_disable)
2392 continue;
2393
2394 funcs->atomic_disable(plane, old_plane_state);
2395 } else if (new_plane_state->crtc || disabling) {
2396 funcs->atomic_update(plane, old_plane_state);
2397 }
2398 }
2399
2400 for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
2401 const struct drm_crtc_helper_funcs *funcs;
2402
2403 funcs = crtc->helper_private;
2404
2405 if (!funcs || !funcs->atomic_flush)
2406 continue;
2407
2408 if (active_only && !new_crtc_state->active)
2409 continue;
2410
2411 funcs->atomic_flush(crtc, old_crtc_state);
2412 }
2413}
2414EXPORT_SYMBOL(drm_atomic_helper_commit_planes);
2415
2416/**
2417 * drm_atomic_helper_commit_planes_on_crtc - commit plane state for a crtc
2418 * @old_crtc_state: atomic state object with the old crtc state
2419 *
2420 * This function commits the new plane state using the plane and atomic helper
2421 * functions for planes on the specific crtc. It assumes that the atomic state
2422 * has already been pushed into the relevant object state pointers, since this
2423 * step can no longer fail.
2424 *
2425 * This function is useful when plane updates should be done crtc-by-crtc
2426 * instead of one global step like drm_atomic_helper_commit_planes() does.
2427 *
2428 * This function can only be savely used when planes are not allowed to move
2429 * between different CRTCs because this function doesn't handle inter-CRTC
2430 * depencies. Callers need to ensure that either no such depencies exist,
2431 * resolve them through ordering of commit calls or through some other means.
2432 */
2433void
2434drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
2435{
2436 const struct drm_crtc_helper_funcs *crtc_funcs;
2437 struct drm_crtc *crtc = old_crtc_state->crtc;
2438 struct drm_atomic_state *old_state = old_crtc_state->state;
2439 struct drm_crtc_state *new_crtc_state =
2440 drm_atomic_get_new_crtc_state(old_state, crtc);
2441 struct drm_plane *plane;
2442 unsigned plane_mask;
2443
2444 plane_mask = old_crtc_state->plane_mask;
2445 plane_mask |= new_crtc_state->plane_mask;
2446
2447 crtc_funcs = crtc->helper_private;
2448 if (crtc_funcs && crtc_funcs->atomic_begin)
2449 crtc_funcs->atomic_begin(crtc, old_crtc_state);
2450
2451 drm_for_each_plane_mask(plane, crtc->dev, plane_mask) {
2452 struct drm_plane_state *old_plane_state =
2453 drm_atomic_get_old_plane_state(old_state, plane);
2454 struct drm_plane_state *new_plane_state =
2455 drm_atomic_get_new_plane_state(old_state, plane);
2456 const struct drm_plane_helper_funcs *plane_funcs;
2457
2458 plane_funcs = plane->helper_private;
2459
2460 if (!old_plane_state || !plane_funcs)
2461 continue;
2462
2463 WARN_ON(new_plane_state->crtc &&
2464 new_plane_state->crtc != crtc);
2465
2466 if (drm_atomic_plane_disabling(old_plane_state, new_plane_state) &&
2467 plane_funcs->atomic_disable)
2468 plane_funcs->atomic_disable(plane, old_plane_state);
2469 else if (new_plane_state->crtc ||
2470 drm_atomic_plane_disabling(old_plane_state, new_plane_state))
2471 plane_funcs->atomic_update(plane, old_plane_state);
2472 }
2473
2474 if (crtc_funcs && crtc_funcs->atomic_flush)
2475 crtc_funcs->atomic_flush(crtc, old_crtc_state);
2476}
2477EXPORT_SYMBOL(drm_atomic_helper_commit_planes_on_crtc);
2478
2479/**
2480 * drm_atomic_helper_disable_planes_on_crtc - helper to disable CRTC's planes
2481 * @old_crtc_state: atomic state object with the old CRTC state
2482 * @atomic: if set, synchronize with CRTC's atomic_begin/flush hooks
2483 *
2484 * Disables all planes associated with the given CRTC. This can be
2485 * used for instance in the CRTC helper atomic_disable callback to disable
2486 * all planes.
2487 *
2488 * If the atomic-parameter is set the function calls the CRTC's
2489 * atomic_begin hook before and atomic_flush hook after disabling the
2490 * planes.
2491 *
2492 * It is a bug to call this function without having implemented the
2493 * &drm_plane_helper_funcs.atomic_disable plane hook.
2494 */
2495void
2496drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc_state *old_crtc_state,
2497 bool atomic)
2498{
2499 struct drm_crtc *crtc = old_crtc_state->crtc;
2500 const struct drm_crtc_helper_funcs *crtc_funcs =
2501 crtc->helper_private;
2502 struct drm_plane *plane;
2503
2504 if (atomic && crtc_funcs && crtc_funcs->atomic_begin)
2505 crtc_funcs->atomic_begin(crtc, NULL);
2506
2507 drm_atomic_crtc_state_for_each_plane(plane, old_crtc_state) {
2508 const struct drm_plane_helper_funcs *plane_funcs =
2509 plane->helper_private;
2510
2511 if (!plane_funcs)
2512 continue;
2513
2514 WARN_ON(!plane_funcs->atomic_disable);
2515 if (plane_funcs->atomic_disable)
2516 plane_funcs->atomic_disable(plane, NULL);
2517 }
2518
2519 if (atomic && crtc_funcs && crtc_funcs->atomic_flush)
2520 crtc_funcs->atomic_flush(crtc, NULL);
2521}
2522EXPORT_SYMBOL(drm_atomic_helper_disable_planes_on_crtc);
2523
2524/**
2525 * drm_atomic_helper_cleanup_planes - cleanup plane resources after commit
2526 * @dev: DRM device
2527 * @old_state: atomic state object with old state structures
2528 *
2529 * This function cleans up plane state, specifically framebuffers, from the old
2530 * configuration. Hence the old configuration must be perserved in @old_state to
2531 * be able to call this function.
2532 *
2533 * This function must also be called on the new state when the atomic update
2534 * fails at any point after calling drm_atomic_helper_prepare_planes().
2535 */
2536void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
2537 struct drm_atomic_state *old_state)
2538{
2539 struct drm_plane *plane;
2540 struct drm_plane_state *old_plane_state, *new_plane_state;
2541 int i;
2542
2543 for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) {
2544 const struct drm_plane_helper_funcs *funcs;
2545 struct drm_plane_state *plane_state;
2546
2547 /*
2548 * This might be called before swapping when commit is aborted,
2549 * in which case we have to cleanup the new state.
2550 */
2551 if (old_plane_state == plane->state)
2552 plane_state = new_plane_state;
2553 else
2554 plane_state = old_plane_state;
2555
2556 funcs = plane->helper_private;
2557
2558 if (funcs->cleanup_fb)
2559 funcs->cleanup_fb(plane, plane_state);
2560 }
2561}
2562EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
2563
2564/**
2565 * drm_atomic_helper_swap_state - store atomic state into current sw state
2566 * @state: atomic state
2567 * @stall: stall for preceeding commits
2568 *
2569 * This function stores the atomic state into the current state pointers in all
2570 * driver objects. It should be called after all failing steps have been done
2571 * and succeeded, but before the actual hardware state is committed.
2572 *
2573 * For cleanup and error recovery the current state for all changed objects will
2574 * be swapped into @state.
2575 *
2576 * With that sequence it fits perfectly into the plane prepare/cleanup sequence:
2577 *
2578 * 1. Call drm_atomic_helper_prepare_planes() with the staged atomic state.
2579 *
2580 * 2. Do any other steps that might fail.
2581 *
2582 * 3. Put the staged state into the current state pointers with this function.
2583 *
2584 * 4. Actually commit the hardware state.
2585 *
2586 * 5. Call drm_atomic_helper_cleanup_planes() with @state, which since step 3
2587 * contains the old state. Also do any other cleanup required with that state.
2588 *
2589 * @stall must be set when nonblocking commits for this driver directly access
2590 * the &drm_plane.state, &drm_crtc.state or &drm_connector.state pointer. With
2591 * the current atomic helpers this is almost always the case, since the helpers
2592 * don't pass the right state structures to the callbacks.
2593 *
2594 * Returns:
2595 *
2596 * Returns 0 on success. Can return -ERESTARTSYS when @stall is true and the
2597 * waiting for the previous commits has been interrupted.
2598 */
2599int drm_atomic_helper_swap_state(struct drm_atomic_state *state,
2600 bool stall)
2601{
2602 int i, ret;
2603 struct drm_connector *connector;
2604 struct drm_connector_state *old_conn_state, *new_conn_state;
2605 struct drm_crtc *crtc;
2606 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
2607 struct drm_plane *plane;
2608 struct drm_plane_state *old_plane_state, *new_plane_state;
2609 struct drm_crtc_commit *commit;
2610 struct drm_private_obj *obj;
2611 struct drm_private_state *old_obj_state, *new_obj_state;
2612
2613 if (stall) {
2614 /*
2615 * We have to stall for hw_done here before
2616 * drm_atomic_helper_wait_for_dependencies() because flip
2617 * depth > 1 is not yet supported by all drivers. As long as
2618 * obj->state is directly dereferenced anywhere in the drivers
2619 * atomic_commit_tail function, then it's unsafe to swap state
2620 * before drm_atomic_helper_commit_hw_done() is called.
2621 */
2622
2623 for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
2624 commit = old_crtc_state->commit;
2625
2626 if (!commit)
2627 continue;
2628
2629 ret = wait_for_completion_interruptible(&commit->hw_done);
2630 if (ret)
2631 return ret;
2632 }
2633
2634 for_each_old_connector_in_state(state, connector, old_conn_state, i) {
2635 commit = old_conn_state->commit;
2636
2637 if (!commit)
2638 continue;
2639
2640 ret = wait_for_completion_interruptible(&commit->hw_done);
2641 if (ret)
2642 return ret;
2643 }
2644
2645 for_each_old_plane_in_state(state, plane, old_plane_state, i) {
2646 commit = old_plane_state->commit;
2647
2648 if (!commit)
2649 continue;
2650
2651 ret = wait_for_completion_interruptible(&commit->hw_done);
2652 if (ret)
2653 return ret;
2654 }
2655 }
2656
2657 for_each_oldnew_connector_in_state(state, connector, old_conn_state, new_conn_state, i) {
2658 WARN_ON(connector->state != old_conn_state);
2659
2660 old_conn_state->state = state;
2661 new_conn_state->state = NULL;
2662
2663 state->connectors[i].state = old_conn_state;
2664 connector->state = new_conn_state;
2665 }
2666
2667 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
2668 WARN_ON(crtc->state != old_crtc_state);
2669
2670 old_crtc_state->state = state;
2671 new_crtc_state->state = NULL;
2672
2673 state->crtcs[i].state = old_crtc_state;
2674 crtc->state = new_crtc_state;
2675
2676 if (new_crtc_state->commit) {
2677 spin_lock(&crtc->commit_lock);
2678 list_add(&new_crtc_state->commit->commit_entry,
2679 &crtc->commit_list);
2680 spin_unlock(&crtc->commit_lock);
2681
2682 new_crtc_state->commit->event = NULL;
2683 }
2684 }
2685
2686 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
2687 WARN_ON(plane->state != old_plane_state);
2688
2689 old_plane_state->state = state;
2690 new_plane_state->state = NULL;
2691
2692 state->planes[i].state = old_plane_state;
2693 plane->state = new_plane_state;
2694 }
2695
2696 for_each_oldnew_private_obj_in_state(state, obj, old_obj_state, new_obj_state, i) {
2697 WARN_ON(obj->state != old_obj_state);
2698
2699 old_obj_state->state = state;
2700 new_obj_state->state = NULL;
2701
2702 state->private_objs[i].state = old_obj_state;
2703 obj->state = new_obj_state;
2704 }
2705
2706 return 0;
2707}
2708EXPORT_SYMBOL(drm_atomic_helper_swap_state);
2709
2710/**
2711 * drm_atomic_helper_update_plane - Helper for primary plane update using atomic
2712 * @plane: plane object to update
2713 * @crtc: owning CRTC of owning plane
2714 * @fb: framebuffer to flip onto plane
2715 * @crtc_x: x offset of primary plane on crtc
2716 * @crtc_y: y offset of primary plane on crtc
2717 * @crtc_w: width of primary plane rectangle on crtc
2718 * @crtc_h: height of primary plane rectangle on crtc
2719 * @src_x: x offset of @fb for panning
2720 * @src_y: y offset of @fb for panning
2721 * @src_w: width of source rectangle in @fb
2722 * @src_h: height of source rectangle in @fb
2723 * @ctx: lock acquire context
2724 *
2725 * Provides a default plane update handler using the atomic driver interface.
2726 *
2727 * RETURNS:
2728 * Zero on success, error code on failure
2729 */
2730int drm_atomic_helper_update_plane(struct drm_plane *plane,
2731 struct drm_crtc *crtc,
2732 struct drm_framebuffer *fb,
2733 int crtc_x, int crtc_y,
2734 unsigned int crtc_w, unsigned int crtc_h,
2735 uint32_t src_x, uint32_t src_y,
2736 uint32_t src_w, uint32_t src_h,
2737 struct drm_modeset_acquire_ctx *ctx)
2738{
2739 struct drm_atomic_state *state;
2740 struct drm_plane_state *plane_state;
2741 int ret = 0;
2742
2743 state = drm_atomic_state_alloc(plane->dev);
2744 if (!state)
2745 return -ENOMEM;
2746
2747 state->acquire_ctx = ctx;
2748 plane_state = drm_atomic_get_plane_state(state, plane);
2749 if (IS_ERR(plane_state)) {
2750 ret = PTR_ERR(plane_state);
2751 goto fail;
2752 }
2753
2754 ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
2755 if (ret != 0)
2756 goto fail;
2757 drm_atomic_set_fb_for_plane(plane_state, fb);
2758 plane_state->crtc_x = crtc_x;
2759 plane_state->crtc_y = crtc_y;
2760 plane_state->crtc_w = crtc_w;
2761 plane_state->crtc_h = crtc_h;
2762 plane_state->src_x = src_x;
2763 plane_state->src_y = src_y;
2764 plane_state->src_w = src_w;
2765 plane_state->src_h = src_h;
2766
2767 if (plane == crtc->cursor)
2768 state->legacy_cursor_update = true;
2769
2770 ret = drm_atomic_commit(state);
2771fail:
2772 drm_atomic_state_put(state);
2773 return ret;
2774}
2775EXPORT_SYMBOL(drm_atomic_helper_update_plane);
2776
2777/**
2778 * drm_atomic_helper_disable_plane - Helper for primary plane disable using * atomic
2779 * @plane: plane to disable
2780 * @ctx: lock acquire context
2781 *
2782 * Provides a default plane disable handler using the atomic driver interface.
2783 *
2784 * RETURNS:
2785 * Zero on success, error code on failure
2786 */
2787int drm_atomic_helper_disable_plane(struct drm_plane *plane,
2788 struct drm_modeset_acquire_ctx *ctx)
2789{
2790 struct drm_atomic_state *state;
2791 struct drm_plane_state *plane_state;
2792 int ret = 0;
2793
2794 state = drm_atomic_state_alloc(plane->dev);
2795 if (!state)
2796 return -ENOMEM;
2797
2798 state->acquire_ctx = ctx;
2799 plane_state = drm_atomic_get_plane_state(state, plane);
2800 if (IS_ERR(plane_state)) {
2801 ret = PTR_ERR(plane_state);
2802 goto fail;
2803 }
2804
2805 if (plane_state->crtc && plane_state->crtc->cursor == plane)
2806 plane_state->state->legacy_cursor_update = true;
2807
2808 ret = __drm_atomic_helper_disable_plane(plane, plane_state);
2809 if (ret != 0)
2810 goto fail;
2811
2812 ret = drm_atomic_commit(state);
2813fail:
2814 drm_atomic_state_put(state);
2815 return ret;
2816}
2817EXPORT_SYMBOL(drm_atomic_helper_disable_plane);
2818
2819/* just used from fb-helper and atomic-helper: */
2820int __drm_atomic_helper_disable_plane(struct drm_plane *plane,
2821 struct drm_plane_state *plane_state)
2822{
2823 int ret;
2824
2825 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
2826 if (ret != 0)
2827 return ret;
2828
2829 drm_atomic_set_fb_for_plane(plane_state, NULL);
2830 plane_state->crtc_x = 0;
2831 plane_state->crtc_y = 0;
2832 plane_state->crtc_w = 0;
2833 plane_state->crtc_h = 0;
2834 plane_state->src_x = 0;
2835 plane_state->src_y = 0;
2836 plane_state->src_w = 0;
2837 plane_state->src_h = 0;
2838
2839 return 0;
2840}
2841
2842static int update_output_state(struct drm_atomic_state *state,
2843 struct drm_mode_set *set)
2844{
2845 struct drm_device *dev = set->crtc->dev;
2846 struct drm_crtc *crtc;
2847 struct drm_crtc_state *new_crtc_state;
2848 struct drm_connector *connector;
2849 struct drm_connector_state *new_conn_state;
2850 int ret, i;
2851
2852 ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
2853 state->acquire_ctx);
2854 if (ret)
2855 return ret;
2856
2857 /* First disable all connectors on the target crtc. */
2858 ret = drm_atomic_add_affected_connectors(state, set->crtc);
2859 if (ret)
2860 return ret;
2861
2862 for_each_new_connector_in_state(state, connector, new_conn_state, i) {
2863 if (new_conn_state->crtc == set->crtc) {
2864 ret = drm_atomic_set_crtc_for_connector(new_conn_state,
2865 NULL);
2866 if (ret)
2867 return ret;
2868
2869 /* Make sure legacy setCrtc always re-trains */
2870 new_conn_state->link_status = DRM_LINK_STATUS_GOOD;
2871 }
2872 }
2873
2874 /* Then set all connectors from set->connectors on the target crtc */
2875 for (i = 0; i < set->num_connectors; i++) {
2876 new_conn_state = drm_atomic_get_connector_state(state,
2877 set->connectors[i]);
2878 if (IS_ERR(new_conn_state))
2879 return PTR_ERR(new_conn_state);
2880
2881 ret = drm_atomic_set_crtc_for_connector(new_conn_state,
2882 set->crtc);
2883 if (ret)
2884 return ret;
2885 }
2886
2887 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
2888 /* Don't update ->enable for the CRTC in the set_config request,
2889 * since a mismatch would indicate a bug in the upper layers.
2890 * The actual modeset code later on will catch any
2891 * inconsistencies here. */
2892 if (crtc == set->crtc)
2893 continue;
2894
2895 if (!new_crtc_state->connector_mask) {
2896 ret = drm_atomic_set_mode_prop_for_crtc(new_crtc_state,
2897 NULL);
2898 if (ret < 0)
2899 return ret;
2900
2901 new_crtc_state->active = false;
2902 }
2903 }
2904
2905 return 0;
2906}
2907
2908/**
2909 * drm_atomic_helper_set_config - set a new config from userspace
2910 * @set: mode set configuration
2911 * @ctx: lock acquisition context
2912 *
2913 * Provides a default crtc set_config handler using the atomic driver interface.
2914 *
2915 * NOTE: For backwards compatibility with old userspace this automatically
2916 * resets the "link-status" property to GOOD, to force any link
2917 * re-training. The SETCRTC ioctl does not define whether an update does
2918 * need a full modeset or just a plane update, hence we're allowed to do
2919 * that. See also drm_connector_set_link_status_property().
2920 *
2921 * Returns:
2922 * Returns 0 on success, negative errno numbers on failure.
2923 */
2924int drm_atomic_helper_set_config(struct drm_mode_set *set,
2925 struct drm_modeset_acquire_ctx *ctx)
2926{
2927 struct drm_atomic_state *state;
2928 struct drm_crtc *crtc = set->crtc;
2929 int ret = 0;
2930
2931 state = drm_atomic_state_alloc(crtc->dev);
2932 if (!state)
2933 return -ENOMEM;
2934
2935 state->acquire_ctx = ctx;
2936 ret = __drm_atomic_helper_set_config(set, state);
2937 if (ret != 0)
2938 goto fail;
2939
2940 ret = handle_conflicting_encoders(state, true);
2941 if (ret)
2942 return ret;
2943
2944 ret = drm_atomic_commit(state);
2945
2946fail:
2947 drm_atomic_state_put(state);
2948 return ret;
2949}
2950EXPORT_SYMBOL(drm_atomic_helper_set_config);
2951
2952/* just used from fb-helper and atomic-helper: */
2953int __drm_atomic_helper_set_config(struct drm_mode_set *set,
2954 struct drm_atomic_state *state)
2955{
2956 struct drm_crtc_state *crtc_state;
2957 struct drm_plane_state *primary_state;
2958 struct drm_crtc *crtc = set->crtc;
2959 int hdisplay, vdisplay;
2960 int ret;
2961
2962 crtc_state = drm_atomic_get_crtc_state(state, crtc);
2963 if (IS_ERR(crtc_state))
2964 return PTR_ERR(crtc_state);
2965
2966 primary_state = drm_atomic_get_plane_state(state, crtc->primary);
2967 if (IS_ERR(primary_state))
2968 return PTR_ERR(primary_state);
2969
2970 if (!set->mode) {
2971 WARN_ON(set->fb);
2972 WARN_ON(set->num_connectors);
2973
2974 ret = drm_atomic_set_mode_for_crtc(crtc_state, NULL);
2975 if (ret != 0)
2976 return ret;
2977
2978 crtc_state->active = false;
2979
2980 ret = drm_atomic_set_crtc_for_plane(primary_state, NULL);
2981 if (ret != 0)
2982 return ret;
2983
2984 drm_atomic_set_fb_for_plane(primary_state, NULL);
2985
2986 goto commit;
2987 }
2988
2989 WARN_ON(!set->fb);
2990 WARN_ON(!set->num_connectors);
2991
2992 ret = drm_atomic_set_mode_for_crtc(crtc_state, set->mode);
2993 if (ret != 0)
2994 return ret;
2995
2996 crtc_state->active = true;
2997
2998 ret = drm_atomic_set_crtc_for_plane(primary_state, crtc);
2999 if (ret != 0)
3000 return ret;
3001
3002 drm_mode_get_hv_timing(set->mode, &hdisplay, &vdisplay);
3003
3004 drm_atomic_set_fb_for_plane(primary_state, set->fb);
3005 primary_state->crtc_x = 0;
3006 primary_state->crtc_y = 0;
3007 primary_state->crtc_w = hdisplay;
3008 primary_state->crtc_h = vdisplay;
3009 primary_state->src_x = set->x << 16;
3010 primary_state->src_y = set->y << 16;
3011 if (drm_rotation_90_or_270(primary_state->rotation)) {
3012 primary_state->src_w = vdisplay << 16;
3013 primary_state->src_h = hdisplay << 16;
3014 } else {
3015 primary_state->src_w = hdisplay << 16;
3016 primary_state->src_h = vdisplay << 16;
3017 }
3018
3019commit:
3020 ret = update_output_state(state, set);
3021 if (ret)
3022 return ret;
3023
3024 return 0;
3025}
3026
3027static int __drm_atomic_helper_disable_all(struct drm_device *dev,
3028 struct drm_modeset_acquire_ctx *ctx,
3029 bool clean_old_fbs)
3030{
3031 struct drm_atomic_state *state;
3032 struct drm_connector_state *conn_state;
3033 struct drm_connector *conn;
3034 struct drm_plane_state *plane_state;
3035 struct drm_plane *plane;
3036 struct drm_crtc_state *crtc_state;
3037 struct drm_crtc *crtc;
3038 int ret, i;
3039
3040 state = drm_atomic_state_alloc(dev);
3041 if (!state)
3042 return -ENOMEM;
3043
3044 state->acquire_ctx = ctx;
3045
3046 drm_for_each_crtc(crtc, dev) {
3047 crtc_state = drm_atomic_get_crtc_state(state, crtc);
3048 if (IS_ERR(crtc_state)) {
3049 ret = PTR_ERR(crtc_state);
3050 goto free;
3051 }
3052
3053 crtc_state->active = false;
3054
3055 ret = drm_atomic_set_mode_prop_for_crtc(crtc_state, NULL);
3056 if (ret < 0)
3057 goto free;
3058
3059 ret = drm_atomic_add_affected_planes(state, crtc);
3060 if (ret < 0)
3061 goto free;
3062
3063 ret = drm_atomic_add_affected_connectors(state, crtc);
3064 if (ret < 0)
3065 goto free;
3066 }
3067
3068 for_each_new_connector_in_state(state, conn, conn_state, i) {
3069 ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
3070 if (ret < 0)
3071 goto free;
3072 }
3073
3074 for_each_new_plane_in_state(state, plane, plane_state, i) {
3075 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
3076 if (ret < 0)
3077 goto free;
3078
3079 drm_atomic_set_fb_for_plane(plane_state, NULL);
3080 }
3081
3082 ret = drm_atomic_commit(state);
3083free:
3084 drm_atomic_state_put(state);
3085 return ret;
3086}
3087
3088/**
3089 * drm_atomic_helper_disable_all - disable all currently active outputs
3090 * @dev: DRM device
3091 * @ctx: lock acquisition context
3092 *
3093 * Loops through all connectors, finding those that aren't turned off and then
3094 * turns them off by setting their DPMS mode to OFF and deactivating the CRTC
3095 * that they are connected to.
3096 *
3097 * This is used for example in suspend/resume to disable all currently active
3098 * functions when suspending. If you just want to shut down everything at e.g.
3099 * driver unload, look at drm_atomic_helper_shutdown().
3100 *
3101 * Note that if callers haven't already acquired all modeset locks this might
3102 * return -EDEADLK, which must be handled by calling drm_modeset_backoff().
3103 *
3104 * Returns:
3105 * 0 on success or a negative error code on failure.
3106 *
3107 * See also:
3108 * drm_atomic_helper_suspend(), drm_atomic_helper_resume() and
3109 * drm_atomic_helper_shutdown().
3110 */
3111int drm_atomic_helper_disable_all(struct drm_device *dev,
3112 struct drm_modeset_acquire_ctx *ctx)
3113{
3114 return __drm_atomic_helper_disable_all(dev, ctx, false);
3115}
3116EXPORT_SYMBOL(drm_atomic_helper_disable_all);
3117
3118/**
3119 * drm_atomic_helper_shutdown - shutdown all CRTC
3120 * @dev: DRM device
3121 *
3122 * This shuts down all CRTC, which is useful for driver unloading. Shutdown on
3123 * suspend should instead be handled with drm_atomic_helper_suspend(), since
3124 * that also takes a snapshot of the modeset state to be restored on resume.
3125 *
3126 * This is just a convenience wrapper around drm_atomic_helper_disable_all(),
3127 * and it is the atomic version of drm_crtc_force_disable_all().
3128 */
3129void drm_atomic_helper_shutdown(struct drm_device *dev)
3130{
3131 struct drm_modeset_acquire_ctx ctx;
3132 int ret;
3133
3134 DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
3135
3136 ret = __drm_atomic_helper_disable_all(dev, &ctx, true);
3137 if (ret)
3138 DRM_ERROR("Disabling all crtc's during unload failed with %i\n", ret);
3139
3140 DRM_MODESET_LOCK_ALL_END(ctx, ret);
3141}
3142EXPORT_SYMBOL(drm_atomic_helper_shutdown);
3143
3144/**
3145 * drm_atomic_helper_duplicate_state - duplicate an atomic state object
3146 * @dev: DRM device
3147 * @ctx: lock acquisition context
3148 *
3149 * Makes a copy of the current atomic state by looping over all objects and
3150 * duplicating their respective states. This is used for example by suspend/
3151 * resume support code to save the state prior to suspend such that it can
3152 * be restored upon resume.
3153 *
3154 * Note that this treats atomic state as persistent between save and restore.
3155 * Drivers must make sure that this is possible and won't result in confusion
3156 * or erroneous behaviour.
3157 *
3158 * Note that if callers haven't already acquired all modeset locks this might
3159 * return -EDEADLK, which must be handled by calling drm_modeset_backoff().
3160 *
3161 * Returns:
3162 * A pointer to the copy of the atomic state object on success or an
3163 * ERR_PTR()-encoded error code on failure.
3164 *
3165 * See also:
3166 * drm_atomic_helper_suspend(), drm_atomic_helper_resume()
3167 */
3168struct drm_atomic_state *
3169drm_atomic_helper_duplicate_state(struct drm_device *dev,
3170 struct drm_modeset_acquire_ctx *ctx)
3171{
3172 struct drm_atomic_state *state;
3173 struct drm_connector *conn;
3174 struct drm_connector_list_iter conn_iter;
3175 struct drm_plane *plane;
3176 struct drm_crtc *crtc;
3177 int err = 0;
3178
3179 state = drm_atomic_state_alloc(dev);
3180 if (!state)
3181 return ERR_PTR(-ENOMEM);
3182
3183 state->acquire_ctx = ctx;
3184
3185 drm_for_each_crtc(crtc, dev) {
3186 struct drm_crtc_state *crtc_state;
3187
3188 crtc_state = drm_atomic_get_crtc_state(state, crtc);
3189 if (IS_ERR(crtc_state)) {
3190 err = PTR_ERR(crtc_state);
3191 goto free;
3192 }
3193 }
3194
3195 drm_for_each_plane(plane, dev) {
3196 struct drm_plane_state *plane_state;
3197
3198 plane_state = drm_atomic_get_plane_state(state, plane);
3199 if (IS_ERR(plane_state)) {
3200 err = PTR_ERR(plane_state);
3201 goto free;
3202 }
3203 }
3204
3205 drm_connector_list_iter_begin(dev, &conn_iter);
3206 drm_for_each_connector_iter(conn, &conn_iter) {
3207 struct drm_connector_state *conn_state;
3208
3209 conn_state = drm_atomic_get_connector_state(state, conn);
3210 if (IS_ERR(conn_state)) {
3211 err = PTR_ERR(conn_state);
3212 drm_connector_list_iter_end(&conn_iter);
3213 goto free;
3214 }
3215 }
3216 drm_connector_list_iter_end(&conn_iter);
3217
3218 /* clear the acquire context so that it isn't accidentally reused */
3219 state->acquire_ctx = NULL;
3220
3221free:
3222 if (err < 0) {
3223 drm_atomic_state_put(state);
3224 state = ERR_PTR(err);
3225 }
3226
3227 return state;
3228}
3229EXPORT_SYMBOL(drm_atomic_helper_duplicate_state);
3230
3231/**
3232 * drm_atomic_helper_suspend - subsystem-level suspend helper
3233 * @dev: DRM device
3234 *
3235 * Duplicates the current atomic state, disables all active outputs and then
3236 * returns a pointer to the original atomic state to the caller. Drivers can
3237 * pass this pointer to the drm_atomic_helper_resume() helper upon resume to
3238 * restore the output configuration that was active at the time the system
3239 * entered suspend.
3240 *
3241 * Note that it is potentially unsafe to use this. The atomic state object
3242 * returned by this function is assumed to be persistent. Drivers must ensure
3243 * that this holds true. Before calling this function, drivers must make sure
3244 * to suspend fbdev emulation so that nothing can be using the device.
3245 *
3246 * Returns:
3247 * A pointer to a copy of the state before suspend on success or an ERR_PTR()-
3248 * encoded error code on failure. Drivers should store the returned atomic
3249 * state object and pass it to the drm_atomic_helper_resume() helper upon
3250 * resume.
3251 *
3252 * See also:
3253 * drm_atomic_helper_duplicate_state(), drm_atomic_helper_disable_all(),
3254 * drm_atomic_helper_resume(), drm_atomic_helper_commit_duplicated_state()
3255 */
3256struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev)
3257{
3258 struct drm_modeset_acquire_ctx ctx;
3259 struct drm_atomic_state *state;
3260 int err;
3261
3262 /* This can never be returned, but it makes the compiler happy */
3263 state = ERR_PTR(-EINVAL);
3264
3265 DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, err);
3266
3267 state = drm_atomic_helper_duplicate_state(dev, &ctx);
3268 if (IS_ERR(state))
3269 goto unlock;
3270
3271 err = drm_atomic_helper_disable_all(dev, &ctx);
3272 if (err < 0) {
3273 drm_atomic_state_put(state);
3274 state = ERR_PTR(err);
3275 goto unlock;
3276 }
3277
3278unlock:
3279 DRM_MODESET_LOCK_ALL_END(ctx, err);
3280 if (err)
3281 return ERR_PTR(err);
3282
3283 return state;
3284}
3285EXPORT_SYMBOL(drm_atomic_helper_suspend);
3286
3287/**
3288 * drm_atomic_helper_commit_duplicated_state - commit duplicated state
3289 * @state: duplicated atomic state to commit
3290 * @ctx: pointer to acquire_ctx to use for commit.
3291 *
3292 * The state returned by drm_atomic_helper_duplicate_state() and
3293 * drm_atomic_helper_suspend() is partially invalid, and needs to
3294 * be fixed up before commit.
3295 *
3296 * Returns:
3297 * 0 on success or a negative error code on failure.
3298 *
3299 * See also:
3300 * drm_atomic_helper_suspend()
3301 */
3302int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
3303 struct drm_modeset_acquire_ctx *ctx)
3304{
3305 int i, ret;
3306 struct drm_plane *plane;
3307 struct drm_plane_state *new_plane_state;
3308 struct drm_connector *connector;
3309 struct drm_connector_state *new_conn_state;
3310 struct drm_crtc *crtc;
3311 struct drm_crtc_state *new_crtc_state;
3312
3313 state->acquire_ctx = ctx;
3314
3315 for_each_new_plane_in_state(state, plane, new_plane_state, i)
3316 state->planes[i].old_state = plane->state;
3317
3318 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
3319 state->crtcs[i].old_state = crtc->state;
3320
3321 for_each_new_connector_in_state(state, connector, new_conn_state, i)
3322 state->connectors[i].old_state = connector->state;
3323
3324 ret = drm_atomic_commit(state);
3325
3326 state->acquire_ctx = NULL;
3327
3328 return ret;
3329}
3330EXPORT_SYMBOL(drm_atomic_helper_commit_duplicated_state);
3331
3332/**
3333 * drm_atomic_helper_resume - subsystem-level resume helper
3334 * @dev: DRM device
3335 * @state: atomic state to resume to
3336 *
3337 * Calls drm_mode_config_reset() to synchronize hardware and software states,
3338 * grabs all modeset locks and commits the atomic state object. This can be
3339 * used in conjunction with the drm_atomic_helper_suspend() helper to
3340 * implement suspend/resume for drivers that support atomic mode-setting.
3341 *
3342 * Returns:
3343 * 0 on success or a negative error code on failure.
3344 *
3345 * See also:
3346 * drm_atomic_helper_suspend()
3347 */
3348int drm_atomic_helper_resume(struct drm_device *dev,
3349 struct drm_atomic_state *state)
3350{
3351 struct drm_modeset_acquire_ctx ctx;
3352 int err;
3353
3354 drm_mode_config_reset(dev);
3355
3356 DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, err);
3357
3358 err = drm_atomic_helper_commit_duplicated_state(state, &ctx);
3359
3360 DRM_MODESET_LOCK_ALL_END(ctx, err);
3361 drm_atomic_state_put(state);
3362
3363 return err;
3364}
3365EXPORT_SYMBOL(drm_atomic_helper_resume);
3366
3367static int page_flip_common(struct drm_atomic_state *state,
3368 struct drm_crtc *crtc,
3369 struct drm_framebuffer *fb,
3370 struct drm_pending_vblank_event *event,
3371 uint32_t flags)
3372{
3373 struct drm_plane *plane = crtc->primary;
3374 struct drm_plane_state *plane_state;
3375 struct drm_crtc_state *crtc_state;
3376 int ret = 0;
3377
3378 crtc_state = drm_atomic_get_crtc_state(state, crtc);
3379 if (IS_ERR(crtc_state))
3380 return PTR_ERR(crtc_state);
3381
3382 crtc_state->event = event;
3383 crtc_state->pageflip_flags = flags;
3384
3385 plane_state = drm_atomic_get_plane_state(state, plane);
3386 if (IS_ERR(plane_state))
3387 return PTR_ERR(plane_state);
3388
3389 ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
3390 if (ret != 0)
3391 return ret;
3392 drm_atomic_set_fb_for_plane(plane_state, fb);
3393
3394 /* Make sure we don't accidentally do a full modeset. */
3395 state->allow_modeset = false;
3396 if (!crtc_state->active) {
3397 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled, rejecting legacy flip\n",
3398 crtc->base.id, crtc->name);
3399 return -EINVAL;
3400 }
3401
3402 return ret;
3403}
3404
3405/**
3406 * drm_atomic_helper_page_flip - execute a legacy page flip
3407 * @crtc: DRM crtc
3408 * @fb: DRM framebuffer
3409 * @event: optional DRM event to signal upon completion
3410 * @flags: flip flags for non-vblank sync'ed updates
3411 * @ctx: lock acquisition context
3412 *
3413 * Provides a default &drm_crtc_funcs.page_flip implementation
3414 * using the atomic driver interface.
3415 *
3416 * Returns:
3417 * Returns 0 on success, negative errno numbers on failure.
3418 *
3419 * See also:
3420 * drm_atomic_helper_page_flip_target()
3421 */
3422int drm_atomic_helper_page_flip(struct drm_crtc *crtc,
3423 struct drm_framebuffer *fb,
3424 struct drm_pending_vblank_event *event,
3425 uint32_t flags,
3426 struct drm_modeset_acquire_ctx *ctx)
3427{
3428 struct drm_plane *plane = crtc->primary;
3429 struct drm_atomic_state *state;
3430 int ret = 0;
3431
3432 state = drm_atomic_state_alloc(plane->dev);
3433 if (!state)
3434 return -ENOMEM;
3435
3436 state->acquire_ctx = ctx;
3437
3438 ret = page_flip_common(state, crtc, fb, event, flags);
3439 if (ret != 0)
3440 goto fail;
3441
3442 ret = drm_atomic_nonblocking_commit(state);
3443fail:
3444 drm_atomic_state_put(state);
3445 return ret;
3446}
3447EXPORT_SYMBOL(drm_atomic_helper_page_flip);
3448
3449/**
3450 * drm_atomic_helper_page_flip_target - do page flip on target vblank period.
3451 * @crtc: DRM crtc
3452 * @fb: DRM framebuffer
3453 * @event: optional DRM event to signal upon completion
3454 * @flags: flip flags for non-vblank sync'ed updates
3455 * @target: specifying the target vblank period when the flip to take effect
3456 * @ctx: lock acquisition context
3457 *
3458 * Provides a default &drm_crtc_funcs.page_flip_target implementation.
3459 * Similar to drm_atomic_helper_page_flip() with extra parameter to specify
3460 * target vblank period to flip.
3461 *
3462 * Returns:
3463 * Returns 0 on success, negative errno numbers on failure.
3464 */
3465int drm_atomic_helper_page_flip_target(struct drm_crtc *crtc,
3466 struct drm_framebuffer *fb,
3467 struct drm_pending_vblank_event *event,
3468 uint32_t flags,
3469 uint32_t target,
3470 struct drm_modeset_acquire_ctx *ctx)
3471{
3472 struct drm_plane *plane = crtc->primary;
3473 struct drm_atomic_state *state;
3474 struct drm_crtc_state *crtc_state;
3475 int ret = 0;
3476
3477 state = drm_atomic_state_alloc(plane->dev);
3478 if (!state)
3479 return -ENOMEM;
3480
3481 state->acquire_ctx = ctx;
3482
3483 ret = page_flip_common(state, crtc, fb, event, flags);
3484 if (ret != 0)
3485 goto fail;
3486
3487 crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
3488 if (WARN_ON(!crtc_state)) {
3489 ret = -EINVAL;
3490 goto fail;
3491 }
3492 crtc_state->target_vblank = target;
3493
3494 ret = drm_atomic_nonblocking_commit(state);
3495fail:
3496 drm_atomic_state_put(state);
3497 return ret;
3498}
3499EXPORT_SYMBOL(drm_atomic_helper_page_flip_target);
3500
3501/**
3502 * drm_atomic_helper_legacy_gamma_set - set the legacy gamma correction table
3503 * @crtc: CRTC object
3504 * @red: red correction table
3505 * @green: green correction table
3506 * @blue: green correction table
3507 * @size: size of the tables
3508 * @ctx: lock acquire context
3509 *
3510 * Implements support for legacy gamma correction table for drivers
3511 * that support color management through the DEGAMMA_LUT/GAMMA_LUT
3512 * properties. See drm_crtc_enable_color_mgmt() and the containing chapter for
3513 * how the atomic color management and gamma tables work.
3514 */
3515int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
3516 u16 *red, u16 *green, u16 *blue,
3517 uint32_t size,
3518 struct drm_modeset_acquire_ctx *ctx)
3519{
3520 struct drm_device *dev = crtc->dev;
3521 struct drm_atomic_state *state;
3522 struct drm_crtc_state *crtc_state;
3523 struct drm_property_blob *blob = NULL;
3524 struct drm_color_lut *blob_data;
3525 int i, ret = 0;
3526 bool replaced;
3527
3528 state = drm_atomic_state_alloc(crtc->dev);
3529 if (!state)
3530 return -ENOMEM;
3531
3532 blob = drm_property_create_blob(dev,
3533 sizeof(struct drm_color_lut) * size,
3534 NULL);
3535 if (IS_ERR(blob)) {
3536 ret = PTR_ERR(blob);
3537 blob = NULL;
3538 goto fail;
3539 }
3540
3541 /* Prepare GAMMA_LUT with the legacy values. */
3542 blob_data = blob->data;
3543 for (i = 0; i < size; i++) {
3544 blob_data[i].red = red[i];
3545 blob_data[i].green = green[i];
3546 blob_data[i].blue = blue[i];
3547 }
3548
3549 state->acquire_ctx = ctx;
3550 crtc_state = drm_atomic_get_crtc_state(state, crtc);
3551 if (IS_ERR(crtc_state)) {
3552 ret = PTR_ERR(crtc_state);
3553 goto fail;
3554 }
3555
3556 /* Reset DEGAMMA_LUT and CTM properties. */
3557 replaced = drm_property_replace_blob(&crtc_state->degamma_lut, NULL);
3558 replaced |= drm_property_replace_blob(&crtc_state->ctm, NULL);
3559 replaced |= drm_property_replace_blob(&crtc_state->gamma_lut, blob);
3560 crtc_state->color_mgmt_changed |= replaced;
3561
3562 ret = drm_atomic_commit(state);
3563
3564fail:
3565 drm_atomic_state_put(state);
3566 drm_property_blob_put(blob);
3567 return ret;
3568}
3569EXPORT_SYMBOL(drm_atomic_helper_legacy_gamma_set);