Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright (c) 2014 Samsung Electronics Co., Ltd
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
13 * of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24#include <linux/debugfs.h>
25#include <linux/err.h>
26#include <linux/media-bus-format.h>
27#include <linux/module.h>
28#include <linux/mutex.h>
29
30#include <drm/drm_atomic_state_helper.h>
31#include <drm/drm_bridge.h>
32#include <drm/drm_debugfs.h>
33#include <drm/drm_edid.h>
34#include <drm/drm_encoder.h>
35#include <drm/drm_file.h>
36#include <drm/drm_of.h>
37#include <drm/drm_print.h>
38
39#include "drm_crtc_internal.h"
40
41/**
42 * DOC: overview
43 *
44 * &struct drm_bridge represents a device that hangs on to an encoder. These are
45 * handy when a regular &drm_encoder entity isn't enough to represent the entire
46 * encoder chain.
47 *
48 * A bridge is always attached to a single &drm_encoder at a time, but can be
49 * either connected to it directly, or through a chain of bridges::
50 *
51 * [ CRTC ---> ] Encoder ---> Bridge A ---> Bridge B
52 *
53 * Here, the output of the encoder feeds to bridge A, and that furthers feeds to
54 * bridge B. Bridge chains can be arbitrarily long, and shall be fully linear:
55 * Chaining multiple bridges to the output of a bridge, or the same bridge to
56 * the output of different bridges, is not supported.
57 *
58 * &drm_bridge, like &drm_panel, aren't &drm_mode_object entities like planes,
59 * CRTCs, encoders or connectors and hence are not visible to userspace. They
60 * just provide additional hooks to get the desired output at the end of the
61 * encoder chain.
62 */
63
64/**
65 * DOC: display driver integration
66 *
67 * Display drivers are responsible for linking encoders with the first bridge
68 * in the chains. This is done by acquiring the appropriate bridge with
69 * devm_drm_of_get_bridge(). Once acquired, the bridge shall be attached to the
70 * encoder with a call to drm_bridge_attach().
71 *
72 * Bridges are responsible for linking themselves with the next bridge in the
73 * chain, if any. This is done the same way as for encoders, with the call to
74 * drm_bridge_attach() occurring in the &drm_bridge_funcs.attach operation.
75 *
76 * Once these links are created, the bridges can participate along with encoder
77 * functions to perform mode validation and fixup (through
78 * drm_bridge_chain_mode_valid() and drm_atomic_bridge_chain_check()), mode
79 * setting (through drm_bridge_chain_mode_set()), enable (through
80 * drm_atomic_bridge_chain_pre_enable() and drm_atomic_bridge_chain_enable())
81 * and disable (through drm_atomic_bridge_chain_disable() and
82 * drm_atomic_bridge_chain_post_disable()). Those functions call the
83 * corresponding operations provided in &drm_bridge_funcs in sequence for all
84 * bridges in the chain.
85 *
86 * For display drivers that use the atomic helpers
87 * drm_atomic_helper_check_modeset(),
88 * drm_atomic_helper_commit_modeset_enables() and
89 * drm_atomic_helper_commit_modeset_disables() (either directly in hand-rolled
90 * commit check and commit tail handlers, or through the higher-level
91 * drm_atomic_helper_check() and drm_atomic_helper_commit_tail() or
92 * drm_atomic_helper_commit_tail_rpm() helpers), this is done transparently and
93 * requires no intervention from the driver. For other drivers, the relevant
94 * DRM bridge chain functions shall be called manually.
95 *
96 * Bridges also participate in implementing the &drm_connector at the end of
97 * the bridge chain. Display drivers may use the drm_bridge_connector_init()
98 * helper to create the &drm_connector, or implement it manually on top of the
99 * connector-related operations exposed by the bridge (see the overview
100 * documentation of bridge operations for more details).
101 */
102
103/**
104 * DOC: special care dsi
105 *
106 * The interaction between the bridges and other frameworks involved in
107 * the probing of the upstream driver and the bridge driver can be
108 * challenging. Indeed, there's multiple cases that needs to be
109 * considered:
110 *
111 * - The upstream driver doesn't use the component framework and isn't a
112 * MIPI-DSI host. In this case, the bridge driver will probe at some
113 * point and the upstream driver should try to probe again by returning
114 * EPROBE_DEFER as long as the bridge driver hasn't probed.
115 *
116 * - The upstream driver doesn't use the component framework, but is a
117 * MIPI-DSI host. The bridge device uses the MIPI-DCS commands to be
118 * controlled. In this case, the bridge device is a child of the
119 * display device and when it will probe it's assured that the display
120 * device (and MIPI-DSI host) is present. The upstream driver will be
121 * assured that the bridge driver is connected between the
122 * &mipi_dsi_host_ops.attach and &mipi_dsi_host_ops.detach operations.
123 * Therefore, it must run mipi_dsi_host_register() in its probe
124 * function, and then run drm_bridge_attach() in its
125 * &mipi_dsi_host_ops.attach hook.
126 *
127 * - The upstream driver uses the component framework and is a MIPI-DSI
128 * host. The bridge device uses the MIPI-DCS commands to be
129 * controlled. This is the same situation than above, and can run
130 * mipi_dsi_host_register() in either its probe or bind hooks.
131 *
132 * - The upstream driver uses the component framework and is a MIPI-DSI
133 * host. The bridge device uses a separate bus (such as I2C) to be
134 * controlled. In this case, there's no correlation between the probe
135 * of the bridge and upstream drivers, so care must be taken to avoid
136 * an endless EPROBE_DEFER loop, with each driver waiting for the
137 * other to probe.
138 *
139 * The ideal pattern to cover the last item (and all the others in the
140 * MIPI-DSI host driver case) is to split the operations like this:
141 *
142 * - The MIPI-DSI host driver must run mipi_dsi_host_register() in its
143 * probe hook. It will make sure that the MIPI-DSI host sticks around,
144 * and that the driver's bind can be called.
145 *
146 * - In its probe hook, the bridge driver must try to find its MIPI-DSI
147 * host, register as a MIPI-DSI device and attach the MIPI-DSI device
148 * to its host. The bridge driver is now functional.
149 *
150 * - In its &struct mipi_dsi_host_ops.attach hook, the MIPI-DSI host can
151 * now add its component. Its bind hook will now be called and since
152 * the bridge driver is attached and registered, we can now look for
153 * and attach it.
154 *
155 * At this point, we're now certain that both the upstream driver and
156 * the bridge driver are functional and we can't have a deadlock-like
157 * situation when probing.
158 */
159
160/**
161 * DOC: dsi bridge operations
162 *
163 * DSI host interfaces are expected to be implemented as bridges rather than
164 * encoders, however there are a few aspects of their operation that need to
165 * be defined in order to provide a consistent interface.
166 *
167 * A DSI host should keep the PHY powered down until the pre_enable operation is
168 * called. All lanes are in an undefined idle state up to this point, and it
169 * must not be assumed that it is LP-11.
170 * pre_enable should initialise the PHY, set the data lanes to LP-11, and the
171 * clock lane to either LP-11 or HS depending on the mode_flag
172 * %MIPI_DSI_CLOCK_NON_CONTINUOUS.
173 *
174 * Ordinarily the downstream bridge DSI peripheral pre_enable will have been
175 * called before the DSI host. If the DSI peripheral requires LP-11 and/or
176 * the clock lane to be in HS mode prior to pre_enable, then it can set the
177 * &pre_enable_prev_first flag to request the pre_enable (and
178 * post_disable) order to be altered to enable the DSI host first.
179 *
180 * Either the CRTC being enabled, or the DSI host enable operation should switch
181 * the host to actively transmitting video on the data lanes.
182 *
183 * The reverse also applies. The DSI host disable operation or stopping the CRTC
184 * should stop transmitting video, and the data lanes should return to the LP-11
185 * state. The DSI host &post_disable operation should disable the PHY.
186 * If the &pre_enable_prev_first flag is set, then the DSI peripheral's
187 * bridge &post_disable will be called before the DSI host's post_disable.
188 *
189 * Whilst it is valid to call &host_transfer prior to pre_enable or after
190 * post_disable, the exact state of the lanes is undefined at this point. The
191 * DSI host should initialise the interface, transmit the data, and then disable
192 * the interface again.
193 *
194 * Ultra Low Power State (ULPS) is not explicitly supported by DRM. If
195 * implemented, it therefore needs to be handled entirely within the DSI Host
196 * driver.
197 */
198
199static DEFINE_MUTEX(bridge_lock);
200static LIST_HEAD(bridge_list);
201
202static void __drm_bridge_free(struct kref *kref)
203{
204 struct drm_bridge *bridge = container_of(kref, struct drm_bridge, refcount);
205
206 kfree(bridge->container);
207}
208
209/**
210 * drm_bridge_get - Acquire a bridge reference
211 * @bridge: DRM bridge
212 *
213 * This function increments the bridge's refcount.
214 *
215 * Returns:
216 * Pointer to @bridge.
217 */
218struct drm_bridge *drm_bridge_get(struct drm_bridge *bridge)
219{
220 if (bridge)
221 kref_get(&bridge->refcount);
222
223 return bridge;
224}
225EXPORT_SYMBOL(drm_bridge_get);
226
227/**
228 * drm_bridge_put - Release a bridge reference
229 * @bridge: DRM bridge
230 *
231 * This function decrements the bridge's reference count and frees the
232 * object if the reference count drops to zero.
233 */
234void drm_bridge_put(struct drm_bridge *bridge)
235{
236 if (bridge)
237 kref_put(&bridge->refcount, __drm_bridge_free);
238}
239EXPORT_SYMBOL(drm_bridge_put);
240
241/**
242 * drm_bridge_put_void - wrapper to drm_bridge_put() taking a void pointer
243 *
244 * @data: pointer to @struct drm_bridge, cast to a void pointer
245 *
246 * Wrapper of drm_bridge_put() to be used when a function taking a void
247 * pointer is needed, for example as a devm action.
248 */
249static void drm_bridge_put_void(void *data)
250{
251 struct drm_bridge *bridge = (struct drm_bridge *)data;
252
253 drm_bridge_put(bridge);
254}
255
256void *__devm_drm_bridge_alloc(struct device *dev, size_t size, size_t offset,
257 const struct drm_bridge_funcs *funcs)
258{
259 void *container;
260 struct drm_bridge *bridge;
261 int err;
262
263 if (!funcs) {
264 dev_warn(dev, "Missing funcs pointer\n");
265 return ERR_PTR(-EINVAL);
266 }
267
268 container = kzalloc(size, GFP_KERNEL);
269 if (!container)
270 return ERR_PTR(-ENOMEM);
271
272 bridge = container + offset;
273 bridge->container = container;
274 bridge->funcs = funcs;
275 kref_init(&bridge->refcount);
276
277 err = devm_add_action_or_reset(dev, drm_bridge_put_void, bridge);
278 if (err)
279 return ERR_PTR(err);
280
281 return container;
282}
283EXPORT_SYMBOL(__devm_drm_bridge_alloc);
284
285/**
286 * drm_bridge_add - add the given bridge to the global bridge list
287 *
288 * @bridge: bridge control structure
289 *
290 * The bridge to be added must have been allocated by
291 * devm_drm_bridge_alloc().
292 */
293void drm_bridge_add(struct drm_bridge *bridge)
294{
295 mutex_init(&bridge->hpd_mutex);
296
297 if (bridge->ops & DRM_BRIDGE_OP_HDMI)
298 bridge->ycbcr_420_allowed = !!(bridge->supported_formats &
299 BIT(HDMI_COLORSPACE_YUV420));
300
301 mutex_lock(&bridge_lock);
302 list_add_tail(&bridge->list, &bridge_list);
303 mutex_unlock(&bridge_lock);
304}
305EXPORT_SYMBOL(drm_bridge_add);
306
307static void drm_bridge_remove_void(void *bridge)
308{
309 drm_bridge_remove(bridge);
310}
311
312/**
313 * devm_drm_bridge_add - devm managed version of drm_bridge_add()
314 *
315 * @dev: device to tie the bridge lifetime to
316 * @bridge: bridge control structure
317 *
318 * This is the managed version of drm_bridge_add() which automatically
319 * calls drm_bridge_remove() when @dev is unbound.
320 *
321 * Return: 0 if no error or negative error code.
322 */
323int devm_drm_bridge_add(struct device *dev, struct drm_bridge *bridge)
324{
325 drm_bridge_add(bridge);
326 return devm_add_action_or_reset(dev, drm_bridge_remove_void, bridge);
327}
328EXPORT_SYMBOL(devm_drm_bridge_add);
329
330/**
331 * drm_bridge_remove - remove the given bridge from the global bridge list
332 *
333 * @bridge: bridge control structure
334 */
335void drm_bridge_remove(struct drm_bridge *bridge)
336{
337 mutex_lock(&bridge_lock);
338 list_del_init(&bridge->list);
339 mutex_unlock(&bridge_lock);
340
341 mutex_destroy(&bridge->hpd_mutex);
342}
343EXPORT_SYMBOL(drm_bridge_remove);
344
345static struct drm_private_state *
346drm_bridge_atomic_duplicate_priv_state(struct drm_private_obj *obj)
347{
348 struct drm_bridge *bridge = drm_priv_to_bridge(obj);
349 struct drm_bridge_state *state;
350
351 state = bridge->funcs->atomic_duplicate_state(bridge);
352 return state ? &state->base : NULL;
353}
354
355static void
356drm_bridge_atomic_destroy_priv_state(struct drm_private_obj *obj,
357 struct drm_private_state *s)
358{
359 struct drm_bridge_state *state = drm_priv_to_bridge_state(s);
360 struct drm_bridge *bridge = drm_priv_to_bridge(obj);
361
362 bridge->funcs->atomic_destroy_state(bridge, state);
363}
364
365static const struct drm_private_state_funcs drm_bridge_priv_state_funcs = {
366 .atomic_duplicate_state = drm_bridge_atomic_duplicate_priv_state,
367 .atomic_destroy_state = drm_bridge_atomic_destroy_priv_state,
368};
369
370static bool drm_bridge_is_atomic(struct drm_bridge *bridge)
371{
372 return bridge->funcs->atomic_reset != NULL;
373}
374
375/**
376 * drm_bridge_attach - attach the bridge to an encoder's chain
377 *
378 * @encoder: DRM encoder
379 * @bridge: bridge to attach
380 * @previous: previous bridge in the chain (optional)
381 * @flags: DRM_BRIDGE_ATTACH_* flags
382 *
383 * Called by a kms driver to link the bridge to an encoder's chain. The previous
384 * argument specifies the previous bridge in the chain. If NULL, the bridge is
385 * linked directly at the encoder's output. Otherwise it is linked at the
386 * previous bridge's output.
387 *
388 * If non-NULL the previous bridge must be already attached by a call to this
389 * function.
390 *
391 * Note that bridges attached to encoders are auto-detached during encoder
392 * cleanup in drm_encoder_cleanup(), so drm_bridge_attach() should generally
393 * *not* be balanced with a drm_bridge_detach() in driver code.
394 *
395 * RETURNS:
396 * Zero on success, error code on failure
397 */
398int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
399 struct drm_bridge *previous,
400 enum drm_bridge_attach_flags flags)
401{
402 int ret;
403
404 if (!encoder || !bridge)
405 return -EINVAL;
406
407 if (previous && (!previous->dev || previous->encoder != encoder))
408 return -EINVAL;
409
410 if (bridge->dev)
411 return -EBUSY;
412
413 bridge->dev = encoder->dev;
414 bridge->encoder = encoder;
415
416 if (previous)
417 list_add(&bridge->chain_node, &previous->chain_node);
418 else
419 list_add(&bridge->chain_node, &encoder->bridge_chain);
420
421 if (bridge->funcs->attach) {
422 ret = bridge->funcs->attach(bridge, encoder, flags);
423 if (ret < 0)
424 goto err_reset_bridge;
425 }
426
427 if (drm_bridge_is_atomic(bridge)) {
428 struct drm_bridge_state *state;
429
430 state = bridge->funcs->atomic_reset(bridge);
431 if (IS_ERR(state)) {
432 ret = PTR_ERR(state);
433 goto err_detach_bridge;
434 }
435
436 drm_atomic_private_obj_init(bridge->dev, &bridge->base,
437 &state->base,
438 &drm_bridge_priv_state_funcs);
439 }
440
441 return 0;
442
443err_detach_bridge:
444 if (bridge->funcs->detach)
445 bridge->funcs->detach(bridge);
446
447err_reset_bridge:
448 bridge->dev = NULL;
449 bridge->encoder = NULL;
450 list_del(&bridge->chain_node);
451
452 if (ret != -EPROBE_DEFER)
453 DRM_ERROR("failed to attach bridge %pOF to encoder %s: %d\n",
454 bridge->of_node, encoder->name, ret);
455 else
456 dev_err_probe(encoder->dev->dev, -EPROBE_DEFER,
457 "failed to attach bridge %pOF to encoder %s\n",
458 bridge->of_node, encoder->name);
459
460 return ret;
461}
462EXPORT_SYMBOL(drm_bridge_attach);
463
464void drm_bridge_detach(struct drm_bridge *bridge)
465{
466 if (WARN_ON(!bridge))
467 return;
468
469 if (WARN_ON(!bridge->dev))
470 return;
471
472 if (drm_bridge_is_atomic(bridge))
473 drm_atomic_private_obj_fini(&bridge->base);
474
475 if (bridge->funcs->detach)
476 bridge->funcs->detach(bridge);
477
478 list_del(&bridge->chain_node);
479 bridge->dev = NULL;
480}
481
482/**
483 * DOC: bridge operations
484 *
485 * Bridge drivers expose operations through the &drm_bridge_funcs structure.
486 * The DRM internals (atomic and CRTC helpers) use the helpers defined in
487 * drm_bridge.c to call bridge operations. Those operations are divided in
488 * three big categories to support different parts of the bridge usage.
489 *
490 * - The encoder-related operations support control of the bridges in the
491 * chain, and are roughly counterparts to the &drm_encoder_helper_funcs
492 * operations. They are used by the legacy CRTC and the atomic modeset
493 * helpers to perform mode validation, fixup and setting, and enable and
494 * disable the bridge automatically.
495 *
496 * The enable and disable operations are split in
497 * &drm_bridge_funcs.pre_enable, &drm_bridge_funcs.enable,
498 * &drm_bridge_funcs.disable and &drm_bridge_funcs.post_disable to provide
499 * finer-grained control.
500 *
501 * Bridge drivers may implement the legacy version of those operations, or
502 * the atomic version (prefixed with atomic\_), in which case they shall also
503 * implement the atomic state bookkeeping operations
504 * (&drm_bridge_funcs.atomic_duplicate_state,
505 * &drm_bridge_funcs.atomic_destroy_state and &drm_bridge_funcs.reset).
506 * Mixing atomic and non-atomic versions of the operations is not supported.
507 *
508 * - The bus format negotiation operations
509 * &drm_bridge_funcs.atomic_get_output_bus_fmts and
510 * &drm_bridge_funcs.atomic_get_input_bus_fmts allow bridge drivers to
511 * negotiate the formats transmitted between bridges in the chain when
512 * multiple formats are supported. Negotiation for formats is performed
513 * transparently for display drivers by the atomic modeset helpers. Only
514 * atomic versions of those operations exist, bridge drivers that need to
515 * implement them shall thus also implement the atomic version of the
516 * encoder-related operations. This feature is not supported by the legacy
517 * CRTC helpers.
518 *
519 * - The connector-related operations support implementing a &drm_connector
520 * based on a chain of bridges. DRM bridges traditionally create a
521 * &drm_connector for bridges meant to be used at the end of the chain. This
522 * puts additional burden on bridge drivers, especially for bridges that may
523 * be used in the middle of a chain or at the end of it. Furthermore, it
524 * requires all operations of the &drm_connector to be handled by a single
525 * bridge, which doesn't always match the hardware architecture.
526 *
527 * To simplify bridge drivers and make the connector implementation more
528 * flexible, a new model allows bridges to unconditionally skip creation of
529 * &drm_connector and instead expose &drm_bridge_funcs operations to support
530 * an externally-implemented &drm_connector. Those operations are
531 * &drm_bridge_funcs.detect, &drm_bridge_funcs.get_modes,
532 * &drm_bridge_funcs.get_edid, &drm_bridge_funcs.hpd_notify,
533 * &drm_bridge_funcs.hpd_enable and &drm_bridge_funcs.hpd_disable. When
534 * implemented, display drivers shall create a &drm_connector instance for
535 * each chain of bridges, and implement those connector instances based on
536 * the bridge connector operations.
537 *
538 * Bridge drivers shall implement the connector-related operations for all
539 * the features that the bridge hardware support. For instance, if a bridge
540 * supports reading EDID, the &drm_bridge_funcs.get_edid shall be
541 * implemented. This however doesn't mean that the DDC lines are wired to the
542 * bridge on a particular platform, as they could also be connected to an I2C
543 * controller of the SoC. Support for the connector-related operations on the
544 * running platform is reported through the &drm_bridge.ops flags. Bridge
545 * drivers shall detect which operations they can support on the platform
546 * (usually this information is provided by ACPI or DT), and set the
547 * &drm_bridge.ops flags for all supported operations. A flag shall only be
548 * set if the corresponding &drm_bridge_funcs operation is implemented, but
549 * an implemented operation doesn't necessarily imply that the corresponding
550 * flag will be set. Display drivers shall use the &drm_bridge.ops flags to
551 * decide which bridge to delegate a connector operation to. This mechanism
552 * allows providing a single static const &drm_bridge_funcs instance in
553 * bridge drivers, improving security by storing function pointers in
554 * read-only memory.
555 *
556 * In order to ease transition, bridge drivers may support both the old and
557 * new models by making connector creation optional and implementing the
558 * connected-related bridge operations. Connector creation is then controlled
559 * by the flags argument to the drm_bridge_attach() function. Display drivers
560 * that support the new model and create connectors themselves shall set the
561 * %DRM_BRIDGE_ATTACH_NO_CONNECTOR flag, and bridge drivers shall then skip
562 * connector creation. For intermediate bridges in the chain, the flag shall
563 * be passed to the drm_bridge_attach() call for the downstream bridge.
564 * Bridge drivers that implement the new model only shall return an error
565 * from their &drm_bridge_funcs.attach handler when the
566 * %DRM_BRIDGE_ATTACH_NO_CONNECTOR flag is not set. New display drivers
567 * should use the new model, and convert the bridge drivers they use if
568 * needed, in order to gradually transition to the new model.
569 */
570
571/**
572 * drm_bridge_chain_mode_valid - validate the mode against all bridges in the
573 * encoder chain.
574 * @bridge: bridge control structure
575 * @info: display info against which the mode shall be validated
576 * @mode: desired mode to be validated
577 *
578 * Calls &drm_bridge_funcs.mode_valid for all the bridges in the encoder
579 * chain, starting from the first bridge to the last. If at least one bridge
580 * does not accept the mode the function returns the error code.
581 *
582 * Note: the bridge passed should be the one closest to the encoder.
583 *
584 * RETURNS:
585 * MODE_OK on success, drm_mode_status Enum error code on failure
586 */
587enum drm_mode_status
588drm_bridge_chain_mode_valid(struct drm_bridge *bridge,
589 const struct drm_display_info *info,
590 const struct drm_display_mode *mode)
591{
592 struct drm_encoder *encoder;
593
594 if (!bridge)
595 return MODE_OK;
596
597 encoder = bridge->encoder;
598 list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
599 enum drm_mode_status ret;
600
601 if (!bridge->funcs->mode_valid)
602 continue;
603
604 ret = bridge->funcs->mode_valid(bridge, info, mode);
605 if (ret != MODE_OK)
606 return ret;
607 }
608
609 return MODE_OK;
610}
611EXPORT_SYMBOL(drm_bridge_chain_mode_valid);
612
613/**
614 * drm_bridge_chain_mode_set - set proposed mode for all bridges in the
615 * encoder chain
616 * @bridge: bridge control structure
617 * @mode: desired mode to be set for the encoder chain
618 * @adjusted_mode: updated mode that works for this encoder chain
619 *
620 * Calls &drm_bridge_funcs.mode_set op for all the bridges in the
621 * encoder chain, starting from the first bridge to the last.
622 *
623 * Note: the bridge passed should be the one closest to the encoder
624 */
625void drm_bridge_chain_mode_set(struct drm_bridge *bridge,
626 const struct drm_display_mode *mode,
627 const struct drm_display_mode *adjusted_mode)
628{
629 struct drm_encoder *encoder;
630
631 if (!bridge)
632 return;
633
634 encoder = bridge->encoder;
635 list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
636 if (bridge->funcs->mode_set)
637 bridge->funcs->mode_set(bridge, mode, adjusted_mode);
638 }
639}
640EXPORT_SYMBOL(drm_bridge_chain_mode_set);
641
642/**
643 * drm_atomic_bridge_chain_disable - disables all bridges in the encoder chain
644 * @bridge: bridge control structure
645 * @state: atomic state being committed
646 *
647 * Calls &drm_bridge_funcs.atomic_disable (falls back on
648 * &drm_bridge_funcs.disable) op for all the bridges in the encoder chain,
649 * starting from the last bridge to the first. These are called before calling
650 * &drm_encoder_helper_funcs.atomic_disable
651 *
652 * Note: the bridge passed should be the one closest to the encoder
653 */
654void drm_atomic_bridge_chain_disable(struct drm_bridge *bridge,
655 struct drm_atomic_state *state)
656{
657 struct drm_encoder *encoder;
658 struct drm_bridge *iter;
659
660 if (!bridge)
661 return;
662
663 encoder = bridge->encoder;
664 list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
665 if (iter->funcs->atomic_disable) {
666 iter->funcs->atomic_disable(iter, state);
667 } else if (iter->funcs->disable) {
668 iter->funcs->disable(iter);
669 }
670
671 if (iter == bridge)
672 break;
673 }
674}
675EXPORT_SYMBOL(drm_atomic_bridge_chain_disable);
676
677static void drm_atomic_bridge_call_post_disable(struct drm_bridge *bridge,
678 struct drm_atomic_state *state)
679{
680 if (state && bridge->funcs->atomic_post_disable)
681 bridge->funcs->atomic_post_disable(bridge, state);
682 else if (bridge->funcs->post_disable)
683 bridge->funcs->post_disable(bridge);
684}
685
686/**
687 * drm_atomic_bridge_chain_post_disable - cleans up after disabling all bridges
688 * in the encoder chain
689 * @bridge: bridge control structure
690 * @state: atomic state being committed
691 *
692 * Calls &drm_bridge_funcs.atomic_post_disable (falls back on
693 * &drm_bridge_funcs.post_disable) op for all the bridges in the encoder chain,
694 * starting from the first bridge to the last. These are called after completing
695 * &drm_encoder_helper_funcs.atomic_disable
696 *
697 * If a bridge sets @pre_enable_prev_first, then the @post_disable for that
698 * bridge will be called before the previous one to reverse the @pre_enable
699 * calling direction.
700 *
701 * Example:
702 * Bridge A ---> Bridge B ---> Bridge C ---> Bridge D ---> Bridge E
703 *
704 * With pre_enable_prev_first flag enable in Bridge B, D, E then the resulting
705 * @post_disable order would be,
706 * Bridge B, Bridge A, Bridge E, Bridge D, Bridge C.
707 *
708 * Note: the bridge passed should be the one closest to the encoder
709 */
710void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge,
711 struct drm_atomic_state *state)
712{
713 struct drm_encoder *encoder;
714 struct drm_bridge *next, *limit;
715
716 if (!bridge)
717 return;
718
719 encoder = bridge->encoder;
720
721 list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
722 limit = NULL;
723
724 if (!list_is_last(&bridge->chain_node, &encoder->bridge_chain)) {
725 next = list_next_entry(bridge, chain_node);
726
727 if (next->pre_enable_prev_first) {
728 /* next bridge had requested that prev
729 * was enabled first, so disabled last
730 */
731 limit = next;
732
733 /* Find the next bridge that has NOT requested
734 * prev to be enabled first / disabled last
735 */
736 list_for_each_entry_from(next, &encoder->bridge_chain,
737 chain_node) {
738 if (!next->pre_enable_prev_first) {
739 next = list_prev_entry(next, chain_node);
740 limit = next;
741 break;
742 }
743
744 if (list_is_last(&next->chain_node,
745 &encoder->bridge_chain)) {
746 limit = next;
747 break;
748 }
749 }
750
751 /* Call these bridges in reverse order */
752 list_for_each_entry_from_reverse(next, &encoder->bridge_chain,
753 chain_node) {
754 if (next == bridge)
755 break;
756
757 drm_atomic_bridge_call_post_disable(next,
758 state);
759 }
760 }
761 }
762
763 drm_atomic_bridge_call_post_disable(bridge, state);
764
765 if (limit)
766 /* Jump all bridges that we have already post_disabled */
767 bridge = limit;
768 }
769}
770EXPORT_SYMBOL(drm_atomic_bridge_chain_post_disable);
771
772static void drm_atomic_bridge_call_pre_enable(struct drm_bridge *bridge,
773 struct drm_atomic_state *state)
774{
775 if (state && bridge->funcs->atomic_pre_enable)
776 bridge->funcs->atomic_pre_enable(bridge, state);
777 else if (bridge->funcs->pre_enable)
778 bridge->funcs->pre_enable(bridge);
779}
780
781/**
782 * drm_atomic_bridge_chain_pre_enable - prepares for enabling all bridges in
783 * the encoder chain
784 * @bridge: bridge control structure
785 * @state: atomic state being committed
786 *
787 * Calls &drm_bridge_funcs.atomic_pre_enable (falls back on
788 * &drm_bridge_funcs.pre_enable) op for all the bridges in the encoder chain,
789 * starting from the last bridge to the first. These are called before calling
790 * &drm_encoder_helper_funcs.atomic_enable
791 *
792 * If a bridge sets @pre_enable_prev_first, then the pre_enable for the
793 * prev bridge will be called before pre_enable of this bridge.
794 *
795 * Example:
796 * Bridge A ---> Bridge B ---> Bridge C ---> Bridge D ---> Bridge E
797 *
798 * With pre_enable_prev_first flag enable in Bridge B, D, E then the resulting
799 * @pre_enable order would be,
800 * Bridge C, Bridge D, Bridge E, Bridge A, Bridge B.
801 *
802 * Note: the bridge passed should be the one closest to the encoder
803 */
804void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge,
805 struct drm_atomic_state *state)
806{
807 struct drm_encoder *encoder;
808 struct drm_bridge *iter, *next, *limit;
809
810 if (!bridge)
811 return;
812
813 encoder = bridge->encoder;
814
815 list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
816 if (iter->pre_enable_prev_first) {
817 next = iter;
818 limit = bridge;
819 list_for_each_entry_from_reverse(next,
820 &encoder->bridge_chain,
821 chain_node) {
822 if (next == bridge)
823 break;
824
825 if (!next->pre_enable_prev_first) {
826 /* Found first bridge that does NOT
827 * request prev to be enabled first
828 */
829 limit = next;
830 break;
831 }
832 }
833
834 list_for_each_entry_from(next, &encoder->bridge_chain, chain_node) {
835 /* Call requested prev bridge pre_enable
836 * in order.
837 */
838 if (next == iter)
839 /* At the first bridge to request prev
840 * bridges called first.
841 */
842 break;
843
844 drm_atomic_bridge_call_pre_enable(next, state);
845 }
846 }
847
848 drm_atomic_bridge_call_pre_enable(iter, state);
849
850 if (iter->pre_enable_prev_first)
851 /* Jump all bridges that we have already pre_enabled */
852 iter = limit;
853
854 if (iter == bridge)
855 break;
856 }
857}
858EXPORT_SYMBOL(drm_atomic_bridge_chain_pre_enable);
859
860/**
861 * drm_atomic_bridge_chain_enable - enables all bridges in the encoder chain
862 * @bridge: bridge control structure
863 * @state: atomic state being committed
864 *
865 * Calls &drm_bridge_funcs.atomic_enable (falls back on
866 * &drm_bridge_funcs.enable) op for all the bridges in the encoder chain,
867 * starting from the first bridge to the last. These are called after completing
868 * &drm_encoder_helper_funcs.atomic_enable
869 *
870 * Note: the bridge passed should be the one closest to the encoder
871 */
872void drm_atomic_bridge_chain_enable(struct drm_bridge *bridge,
873 struct drm_atomic_state *state)
874{
875 struct drm_encoder *encoder;
876
877 if (!bridge)
878 return;
879
880 encoder = bridge->encoder;
881 list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
882 if (bridge->funcs->atomic_enable) {
883 bridge->funcs->atomic_enable(bridge, state);
884 } else if (bridge->funcs->enable) {
885 bridge->funcs->enable(bridge);
886 }
887 }
888}
889EXPORT_SYMBOL(drm_atomic_bridge_chain_enable);
890
891static int drm_atomic_bridge_check(struct drm_bridge *bridge,
892 struct drm_crtc_state *crtc_state,
893 struct drm_connector_state *conn_state)
894{
895 if (bridge->funcs->atomic_check) {
896 struct drm_bridge_state *bridge_state;
897 int ret;
898
899 bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state,
900 bridge);
901 if (WARN_ON(!bridge_state))
902 return -EINVAL;
903
904 ret = bridge->funcs->atomic_check(bridge, bridge_state,
905 crtc_state, conn_state);
906 if (ret)
907 return ret;
908 } else if (bridge->funcs->mode_fixup) {
909 if (!bridge->funcs->mode_fixup(bridge, &crtc_state->mode,
910 &crtc_state->adjusted_mode))
911 return -EINVAL;
912 }
913
914 return 0;
915}
916
917static int select_bus_fmt_recursive(struct drm_bridge *first_bridge,
918 struct drm_bridge *cur_bridge,
919 struct drm_crtc_state *crtc_state,
920 struct drm_connector_state *conn_state,
921 u32 out_bus_fmt)
922{
923 unsigned int i, num_in_bus_fmts = 0;
924 struct drm_bridge_state *cur_state;
925 struct drm_bridge *prev_bridge;
926 u32 *in_bus_fmts;
927 int ret;
928
929 prev_bridge = drm_bridge_get_prev_bridge(cur_bridge);
930 cur_state = drm_atomic_get_new_bridge_state(crtc_state->state,
931 cur_bridge);
932
933 /*
934 * If bus format negotiation is not supported by this bridge, let's
935 * pass MEDIA_BUS_FMT_FIXED to the previous bridge in the chain and
936 * hope that it can handle this situation gracefully (by providing
937 * appropriate default values).
938 */
939 if (!cur_bridge->funcs->atomic_get_input_bus_fmts) {
940 if (cur_bridge != first_bridge) {
941 ret = select_bus_fmt_recursive(first_bridge,
942 prev_bridge, crtc_state,
943 conn_state,
944 MEDIA_BUS_FMT_FIXED);
945 if (ret)
946 return ret;
947 }
948
949 /*
950 * Driver does not implement the atomic state hooks, but that's
951 * fine, as long as it does not access the bridge state.
952 */
953 if (cur_state) {
954 cur_state->input_bus_cfg.format = MEDIA_BUS_FMT_FIXED;
955 cur_state->output_bus_cfg.format = out_bus_fmt;
956 }
957
958 return 0;
959 }
960
961 /*
962 * If the driver implements ->atomic_get_input_bus_fmts() it
963 * should also implement the atomic state hooks.
964 */
965 if (WARN_ON(!cur_state))
966 return -EINVAL;
967
968 in_bus_fmts = cur_bridge->funcs->atomic_get_input_bus_fmts(cur_bridge,
969 cur_state,
970 crtc_state,
971 conn_state,
972 out_bus_fmt,
973 &num_in_bus_fmts);
974 if (!num_in_bus_fmts)
975 return -ENOTSUPP;
976 else if (!in_bus_fmts)
977 return -ENOMEM;
978
979 if (first_bridge == cur_bridge) {
980 cur_state->input_bus_cfg.format = in_bus_fmts[0];
981 cur_state->output_bus_cfg.format = out_bus_fmt;
982 kfree(in_bus_fmts);
983 return 0;
984 }
985
986 for (i = 0; i < num_in_bus_fmts; i++) {
987 ret = select_bus_fmt_recursive(first_bridge, prev_bridge,
988 crtc_state, conn_state,
989 in_bus_fmts[i]);
990 if (ret != -ENOTSUPP)
991 break;
992 }
993
994 if (!ret) {
995 cur_state->input_bus_cfg.format = in_bus_fmts[i];
996 cur_state->output_bus_cfg.format = out_bus_fmt;
997 }
998
999 kfree(in_bus_fmts);
1000 return ret;
1001}
1002
1003/*
1004 * This function is called by &drm_atomic_bridge_chain_check() just before
1005 * calling &drm_bridge_funcs.atomic_check() on all elements of the chain.
1006 * It performs bus format negotiation between bridge elements. The negotiation
1007 * happens in reverse order, starting from the last element in the chain up to
1008 * @bridge.
1009 *
1010 * Negotiation starts by retrieving supported output bus formats on the last
1011 * bridge element and testing them one by one. The test is recursive, meaning
1012 * that for each tested output format, the whole chain will be walked backward,
1013 * and each element will have to choose an input bus format that can be
1014 * transcoded to the requested output format. When a bridge element does not
1015 * support transcoding into a specific output format -ENOTSUPP is returned and
1016 * the next bridge element will have to try a different format. If none of the
1017 * combinations worked, -ENOTSUPP is returned and the atomic modeset will fail.
1018 *
1019 * This implementation is relying on
1020 * &drm_bridge_funcs.atomic_get_output_bus_fmts() and
1021 * &drm_bridge_funcs.atomic_get_input_bus_fmts() to gather supported
1022 * input/output formats.
1023 *
1024 * When &drm_bridge_funcs.atomic_get_output_bus_fmts() is not implemented by
1025 * the last element of the chain, &drm_atomic_bridge_chain_select_bus_fmts()
1026 * tries a single format: &drm_connector.display_info.bus_formats[0] if
1027 * available, MEDIA_BUS_FMT_FIXED otherwise.
1028 *
1029 * When &drm_bridge_funcs.atomic_get_input_bus_fmts() is not implemented,
1030 * &drm_atomic_bridge_chain_select_bus_fmts() skips the negotiation on the
1031 * bridge element that lacks this hook and asks the previous element in the
1032 * chain to try MEDIA_BUS_FMT_FIXED. It's up to bridge drivers to decide what
1033 * to do in that case (fail if they want to enforce bus format negotiation, or
1034 * provide a reasonable default if they need to support pipelines where not
1035 * all elements support bus format negotiation).
1036 */
1037static int
1038drm_atomic_bridge_chain_select_bus_fmts(struct drm_bridge *bridge,
1039 struct drm_crtc_state *crtc_state,
1040 struct drm_connector_state *conn_state)
1041{
1042 struct drm_connector *conn = conn_state->connector;
1043 struct drm_encoder *encoder = bridge->encoder;
1044 struct drm_bridge_state *last_bridge_state;
1045 unsigned int i, num_out_bus_fmts = 0;
1046 struct drm_bridge *last_bridge;
1047 u32 *out_bus_fmts;
1048 int ret = 0;
1049
1050 last_bridge = list_last_entry(&encoder->bridge_chain,
1051 struct drm_bridge, chain_node);
1052 last_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state,
1053 last_bridge);
1054
1055 if (last_bridge->funcs->atomic_get_output_bus_fmts) {
1056 const struct drm_bridge_funcs *funcs = last_bridge->funcs;
1057
1058 /*
1059 * If the driver implements ->atomic_get_output_bus_fmts() it
1060 * should also implement the atomic state hooks.
1061 */
1062 if (WARN_ON(!last_bridge_state))
1063 return -EINVAL;
1064
1065 out_bus_fmts = funcs->atomic_get_output_bus_fmts(last_bridge,
1066 last_bridge_state,
1067 crtc_state,
1068 conn_state,
1069 &num_out_bus_fmts);
1070 if (!num_out_bus_fmts)
1071 return -ENOTSUPP;
1072 else if (!out_bus_fmts)
1073 return -ENOMEM;
1074 } else {
1075 num_out_bus_fmts = 1;
1076 out_bus_fmts = kmalloc(sizeof(*out_bus_fmts), GFP_KERNEL);
1077 if (!out_bus_fmts)
1078 return -ENOMEM;
1079
1080 if (conn->display_info.num_bus_formats &&
1081 conn->display_info.bus_formats)
1082 out_bus_fmts[0] = conn->display_info.bus_formats[0];
1083 else
1084 out_bus_fmts[0] = MEDIA_BUS_FMT_FIXED;
1085 }
1086
1087 for (i = 0; i < num_out_bus_fmts; i++) {
1088 ret = select_bus_fmt_recursive(bridge, last_bridge, crtc_state,
1089 conn_state, out_bus_fmts[i]);
1090 if (ret != -ENOTSUPP)
1091 break;
1092 }
1093
1094 kfree(out_bus_fmts);
1095
1096 return ret;
1097}
1098
1099static void
1100drm_atomic_bridge_propagate_bus_flags(struct drm_bridge *bridge,
1101 struct drm_connector *conn,
1102 struct drm_atomic_state *state)
1103{
1104 struct drm_bridge_state *bridge_state, *next_bridge_state;
1105 struct drm_bridge *next_bridge;
1106 u32 output_flags = 0;
1107
1108 bridge_state = drm_atomic_get_new_bridge_state(state, bridge);
1109
1110 /* No bridge state attached to this bridge => nothing to propagate. */
1111 if (!bridge_state)
1112 return;
1113
1114 next_bridge = drm_bridge_get_next_bridge(bridge);
1115
1116 /*
1117 * Let's try to apply the most common case here, that is, propagate
1118 * display_info flags for the last bridge, and propagate the input
1119 * flags of the next bridge element to the output end of the current
1120 * bridge when the bridge is not the last one.
1121 * There are exceptions to this rule, like when signal inversion is
1122 * happening at the board level, but that's something drivers can deal
1123 * with from their &drm_bridge_funcs.atomic_check() implementation by
1124 * simply overriding the flags value we've set here.
1125 */
1126 if (!next_bridge) {
1127 output_flags = conn->display_info.bus_flags;
1128 } else {
1129 next_bridge_state = drm_atomic_get_new_bridge_state(state,
1130 next_bridge);
1131 /*
1132 * No bridge state attached to the next bridge, just leave the
1133 * flags to 0.
1134 */
1135 if (next_bridge_state)
1136 output_flags = next_bridge_state->input_bus_cfg.flags;
1137 }
1138
1139 bridge_state->output_bus_cfg.flags = output_flags;
1140
1141 /*
1142 * Propagate the output flags to the input end of the bridge. Again, it's
1143 * not necessarily what all bridges want, but that's what most of them
1144 * do, and by doing that by default we avoid forcing drivers to
1145 * duplicate the "dummy propagation" logic.
1146 */
1147 bridge_state->input_bus_cfg.flags = output_flags;
1148}
1149
1150/**
1151 * drm_atomic_bridge_chain_check() - Do an atomic check on the bridge chain
1152 * @bridge: bridge control structure
1153 * @crtc_state: new CRTC state
1154 * @conn_state: new connector state
1155 *
1156 * First trigger a bus format negotiation before calling
1157 * &drm_bridge_funcs.atomic_check() (falls back on
1158 * &drm_bridge_funcs.mode_fixup()) op for all the bridges in the encoder chain,
1159 * starting from the last bridge to the first. These are called before calling
1160 * &drm_encoder_helper_funcs.atomic_check()
1161 *
1162 * RETURNS:
1163 * 0 on success, a negative error code on failure
1164 */
1165int drm_atomic_bridge_chain_check(struct drm_bridge *bridge,
1166 struct drm_crtc_state *crtc_state,
1167 struct drm_connector_state *conn_state)
1168{
1169 struct drm_connector *conn = conn_state->connector;
1170 struct drm_encoder *encoder;
1171 struct drm_bridge *iter;
1172 int ret;
1173
1174 if (!bridge)
1175 return 0;
1176
1177 ret = drm_atomic_bridge_chain_select_bus_fmts(bridge, crtc_state,
1178 conn_state);
1179 if (ret)
1180 return ret;
1181
1182 encoder = bridge->encoder;
1183 list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
1184 int ret;
1185
1186 /*
1187 * Bus flags are propagated by default. If a bridge needs to
1188 * tweak the input bus flags for any reason, it should happen
1189 * in its &drm_bridge_funcs.atomic_check() implementation such
1190 * that preceding bridges in the chain can propagate the new
1191 * bus flags.
1192 */
1193 drm_atomic_bridge_propagate_bus_flags(iter, conn,
1194 crtc_state->state);
1195
1196 ret = drm_atomic_bridge_check(iter, crtc_state, conn_state);
1197 if (ret)
1198 return ret;
1199
1200 if (iter == bridge)
1201 break;
1202 }
1203
1204 return 0;
1205}
1206EXPORT_SYMBOL(drm_atomic_bridge_chain_check);
1207
1208/**
1209 * drm_bridge_detect - check if anything is attached to the bridge output
1210 * @bridge: bridge control structure
1211 *
1212 * If the bridge supports output detection, as reported by the
1213 * DRM_BRIDGE_OP_DETECT bridge ops flag, call &drm_bridge_funcs.detect for the
1214 * bridge and return the connection status. Otherwise return
1215 * connector_status_unknown.
1216 *
1217 * RETURNS:
1218 * The detection status on success, or connector_status_unknown if the bridge
1219 * doesn't support output detection.
1220 */
1221enum drm_connector_status drm_bridge_detect(struct drm_bridge *bridge)
1222{
1223 if (!(bridge->ops & DRM_BRIDGE_OP_DETECT))
1224 return connector_status_unknown;
1225
1226 return bridge->funcs->detect(bridge);
1227}
1228EXPORT_SYMBOL_GPL(drm_bridge_detect);
1229
1230/**
1231 * drm_bridge_get_modes - fill all modes currently valid for the sink into the
1232 * @connector
1233 * @bridge: bridge control structure
1234 * @connector: the connector to fill with modes
1235 *
1236 * If the bridge supports output modes retrieval, as reported by the
1237 * DRM_BRIDGE_OP_MODES bridge ops flag, call &drm_bridge_funcs.get_modes to
1238 * fill the connector with all valid modes and return the number of modes
1239 * added. Otherwise return 0.
1240 *
1241 * RETURNS:
1242 * The number of modes added to the connector.
1243 */
1244int drm_bridge_get_modes(struct drm_bridge *bridge,
1245 struct drm_connector *connector)
1246{
1247 if (!(bridge->ops & DRM_BRIDGE_OP_MODES))
1248 return 0;
1249
1250 return bridge->funcs->get_modes(bridge, connector);
1251}
1252EXPORT_SYMBOL_GPL(drm_bridge_get_modes);
1253
1254/**
1255 * drm_bridge_edid_read - read the EDID data of the connected display
1256 * @bridge: bridge control structure
1257 * @connector: the connector to read EDID for
1258 *
1259 * If the bridge supports output EDID retrieval, as reported by the
1260 * DRM_BRIDGE_OP_EDID bridge ops flag, call &drm_bridge_funcs.edid_read to get
1261 * the EDID and return it. Otherwise return NULL.
1262 *
1263 * RETURNS:
1264 * The retrieved EDID on success, or NULL otherwise.
1265 */
1266const struct drm_edid *drm_bridge_edid_read(struct drm_bridge *bridge,
1267 struct drm_connector *connector)
1268{
1269 if (!(bridge->ops & DRM_BRIDGE_OP_EDID))
1270 return NULL;
1271
1272 return bridge->funcs->edid_read(bridge, connector);
1273}
1274EXPORT_SYMBOL_GPL(drm_bridge_edid_read);
1275
1276/**
1277 * drm_bridge_hpd_enable - enable hot plug detection for the bridge
1278 * @bridge: bridge control structure
1279 * @cb: hot-plug detection callback
1280 * @data: data to be passed to the hot-plug detection callback
1281 *
1282 * Call &drm_bridge_funcs.hpd_enable if implemented and register the given @cb
1283 * and @data as hot plug notification callback. From now on the @cb will be
1284 * called with @data when an output status change is detected by the bridge,
1285 * until hot plug notification gets disabled with drm_bridge_hpd_disable().
1286 *
1287 * Hot plug detection is supported only if the DRM_BRIDGE_OP_HPD flag is set in
1288 * bridge->ops. This function shall not be called when the flag is not set.
1289 *
1290 * Only one hot plug detection callback can be registered at a time, it is an
1291 * error to call this function when hot plug detection is already enabled for
1292 * the bridge.
1293 */
1294void drm_bridge_hpd_enable(struct drm_bridge *bridge,
1295 void (*cb)(void *data,
1296 enum drm_connector_status status),
1297 void *data)
1298{
1299 if (!(bridge->ops & DRM_BRIDGE_OP_HPD))
1300 return;
1301
1302 mutex_lock(&bridge->hpd_mutex);
1303
1304 if (WARN(bridge->hpd_cb, "Hot plug detection already enabled\n"))
1305 goto unlock;
1306
1307 bridge->hpd_cb = cb;
1308 bridge->hpd_data = data;
1309
1310 if (bridge->funcs->hpd_enable)
1311 bridge->funcs->hpd_enable(bridge);
1312
1313unlock:
1314 mutex_unlock(&bridge->hpd_mutex);
1315}
1316EXPORT_SYMBOL_GPL(drm_bridge_hpd_enable);
1317
1318/**
1319 * drm_bridge_hpd_disable - disable hot plug detection for the bridge
1320 * @bridge: bridge control structure
1321 *
1322 * Call &drm_bridge_funcs.hpd_disable if implemented and unregister the hot
1323 * plug detection callback previously registered with drm_bridge_hpd_enable().
1324 * Once this function returns the callback will not be called by the bridge
1325 * when an output status change occurs.
1326 *
1327 * Hot plug detection is supported only if the DRM_BRIDGE_OP_HPD flag is set in
1328 * bridge->ops. This function shall not be called when the flag is not set.
1329 */
1330void drm_bridge_hpd_disable(struct drm_bridge *bridge)
1331{
1332 if (!(bridge->ops & DRM_BRIDGE_OP_HPD))
1333 return;
1334
1335 mutex_lock(&bridge->hpd_mutex);
1336 if (bridge->funcs->hpd_disable)
1337 bridge->funcs->hpd_disable(bridge);
1338
1339 bridge->hpd_cb = NULL;
1340 bridge->hpd_data = NULL;
1341 mutex_unlock(&bridge->hpd_mutex);
1342}
1343EXPORT_SYMBOL_GPL(drm_bridge_hpd_disable);
1344
1345/**
1346 * drm_bridge_hpd_notify - notify hot plug detection events
1347 * @bridge: bridge control structure
1348 * @status: output connection status
1349 *
1350 * Bridge drivers shall call this function to report hot plug events when they
1351 * detect a change in the output status, when hot plug detection has been
1352 * enabled by drm_bridge_hpd_enable().
1353 *
1354 * This function shall be called in a context that can sleep.
1355 */
1356void drm_bridge_hpd_notify(struct drm_bridge *bridge,
1357 enum drm_connector_status status)
1358{
1359 mutex_lock(&bridge->hpd_mutex);
1360 if (bridge->hpd_cb)
1361 bridge->hpd_cb(bridge->hpd_data, status);
1362 mutex_unlock(&bridge->hpd_mutex);
1363}
1364EXPORT_SYMBOL_GPL(drm_bridge_hpd_notify);
1365
1366#ifdef CONFIG_OF
1367/**
1368 * of_drm_find_bridge - find the bridge corresponding to the device node in
1369 * the global bridge list
1370 *
1371 * @np: device node
1372 *
1373 * RETURNS:
1374 * drm_bridge control struct on success, NULL on failure
1375 */
1376struct drm_bridge *of_drm_find_bridge(struct device_node *np)
1377{
1378 struct drm_bridge *bridge;
1379
1380 mutex_lock(&bridge_lock);
1381
1382 list_for_each_entry(bridge, &bridge_list, list) {
1383 if (bridge->of_node == np) {
1384 mutex_unlock(&bridge_lock);
1385 return bridge;
1386 }
1387 }
1388
1389 mutex_unlock(&bridge_lock);
1390 return NULL;
1391}
1392EXPORT_SYMBOL(of_drm_find_bridge);
1393#endif
1394
1395static void drm_bridge_debugfs_show_bridge(struct drm_printer *p,
1396 struct drm_bridge *bridge,
1397 unsigned int idx)
1398{
1399 drm_printf(p, "bridge[%u]: %ps\n", idx, bridge->funcs);
1400 drm_printf(p, "\ttype: [%d] %s\n",
1401 bridge->type,
1402 drm_get_connector_type_name(bridge->type));
1403
1404 if (bridge->of_node)
1405 drm_printf(p, "\tOF: %pOFfc\n", bridge->of_node);
1406
1407 drm_printf(p, "\tops: [0x%x]", bridge->ops);
1408 if (bridge->ops & DRM_BRIDGE_OP_DETECT)
1409 drm_puts(p, " detect");
1410 if (bridge->ops & DRM_BRIDGE_OP_EDID)
1411 drm_puts(p, " edid");
1412 if (bridge->ops & DRM_BRIDGE_OP_HPD)
1413 drm_puts(p, " hpd");
1414 if (bridge->ops & DRM_BRIDGE_OP_MODES)
1415 drm_puts(p, " modes");
1416 if (bridge->ops & DRM_BRIDGE_OP_HDMI)
1417 drm_puts(p, " hdmi");
1418 drm_puts(p, "\n");
1419}
1420
1421static int allbridges_show(struct seq_file *m, void *data)
1422{
1423 struct drm_printer p = drm_seq_file_printer(m);
1424 struct drm_bridge *bridge;
1425 unsigned int idx = 0;
1426
1427 mutex_lock(&bridge_lock);
1428
1429 list_for_each_entry(bridge, &bridge_list, list)
1430 drm_bridge_debugfs_show_bridge(&p, bridge, idx++);
1431
1432 mutex_unlock(&bridge_lock);
1433
1434 return 0;
1435}
1436DEFINE_SHOW_ATTRIBUTE(allbridges);
1437
1438static int encoder_bridges_show(struct seq_file *m, void *data)
1439{
1440 struct drm_encoder *encoder = m->private;
1441 struct drm_printer p = drm_seq_file_printer(m);
1442 struct drm_bridge *bridge;
1443 unsigned int idx = 0;
1444
1445 drm_for_each_bridge_in_chain(encoder, bridge)
1446 drm_bridge_debugfs_show_bridge(&p, bridge, idx++);
1447
1448 return 0;
1449}
1450DEFINE_SHOW_ATTRIBUTE(encoder_bridges);
1451
1452void drm_bridge_debugfs_params(struct dentry *root)
1453{
1454 debugfs_create_file("bridges", 0444, root, NULL, &allbridges_fops);
1455}
1456
1457void drm_bridge_debugfs_encoder_params(struct dentry *root,
1458 struct drm_encoder *encoder)
1459{
1460 /* bridges list */
1461 debugfs_create_file("bridges", 0444, root, encoder, &encoder_bridges_fops);
1462}
1463
1464MODULE_AUTHOR("Ajay Kumar <ajaykumar.rs@samsung.com>");
1465MODULE_DESCRIPTION("DRM bridge infrastructure");
1466MODULE_LICENSE("GPL and additional rights");