Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright © 2014 Red Hat
3 *
4 * Permission to use, copy, modify, distribute, and sell this software and its
5 * documentation for any purpose is hereby granted without fee, provided that
6 * the above copyright notice appear in all copies and that both that copyright
7 * notice and this permission notice appear in supporting documentation, and
8 * that the name of the copyright holders not be used in advertising or
9 * publicity pertaining to distribution of the software without specific,
10 * written prior permission. The copyright holders make no representations
11 * about the suitability of this software for any purpose. It is provided "as
12 * is" without express or implied warranty.
13 *
14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
20 * OF THIS SOFTWARE.
21 */
22
23#include <linux/delay.h>
24#include <linux/errno.h>
25#include <linux/i2c.h>
26#include <linux/init.h>
27#include <linux/kernel.h>
28#include <linux/sched.h>
29#include <linux/seq_file.h>
30
31#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
32#include <linux/stacktrace.h>
33#include <linux/sort.h>
34#include <linux/timekeeping.h>
35#include <linux/math64.h>
36#endif
37
38#include <drm/drm_atomic.h>
39#include <drm/drm_atomic_helper.h>
40#include <drm/drm_dp_mst_helper.h>
41#include <drm/drm_drv.h>
42#include <drm/drm_print.h>
43#include <drm/drm_probe_helper.h>
44
45#include "drm_crtc_helper_internal.h"
46#include "drm_dp_mst_topology_internal.h"
47
48/**
49 * DOC: dp mst helper
50 *
51 * These functions contain parts of the DisplayPort 1.2a MultiStream Transport
52 * protocol. The helpers contain a topology manager and bandwidth manager.
53 * The helpers encapsulate the sending and received of sideband msgs.
54 */
55struct drm_dp_pending_up_req {
56 struct drm_dp_sideband_msg_hdr hdr;
57 struct drm_dp_sideband_msg_req_body msg;
58 struct list_head next;
59};
60
61static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
62 char *buf);
63
64static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);
65
66static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
67 int id,
68 struct drm_dp_payload *payload);
69
70static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
71 struct drm_dp_mst_port *port,
72 int offset, int size, u8 *bytes);
73static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
74 struct drm_dp_mst_port *port,
75 int offset, int size, u8 *bytes);
76
77static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
78 struct drm_dp_mst_branch *mstb);
79
80static void
81drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
82 struct drm_dp_mst_branch *mstb);
83
84static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
85 struct drm_dp_mst_branch *mstb,
86 struct drm_dp_mst_port *port);
87static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
88 u8 *guid);
89
90static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux);
91static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux);
92static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
93
94#define DBG_PREFIX "[dp_mst]"
95
96#define DP_STR(x) [DP_ ## x] = #x
97
98static const char *drm_dp_mst_req_type_str(u8 req_type)
99{
100 static const char * const req_type_str[] = {
101 DP_STR(GET_MSG_TRANSACTION_VERSION),
102 DP_STR(LINK_ADDRESS),
103 DP_STR(CONNECTION_STATUS_NOTIFY),
104 DP_STR(ENUM_PATH_RESOURCES),
105 DP_STR(ALLOCATE_PAYLOAD),
106 DP_STR(QUERY_PAYLOAD),
107 DP_STR(RESOURCE_STATUS_NOTIFY),
108 DP_STR(CLEAR_PAYLOAD_ID_TABLE),
109 DP_STR(REMOTE_DPCD_READ),
110 DP_STR(REMOTE_DPCD_WRITE),
111 DP_STR(REMOTE_I2C_READ),
112 DP_STR(REMOTE_I2C_WRITE),
113 DP_STR(POWER_UP_PHY),
114 DP_STR(POWER_DOWN_PHY),
115 DP_STR(SINK_EVENT_NOTIFY),
116 DP_STR(QUERY_STREAM_ENC_STATUS),
117 };
118
119 if (req_type >= ARRAY_SIZE(req_type_str) ||
120 !req_type_str[req_type])
121 return "unknown";
122
123 return req_type_str[req_type];
124}
125
126#undef DP_STR
127#define DP_STR(x) [DP_NAK_ ## x] = #x
128
129static const char *drm_dp_mst_nak_reason_str(u8 nak_reason)
130{
131 static const char * const nak_reason_str[] = {
132 DP_STR(WRITE_FAILURE),
133 DP_STR(INVALID_READ),
134 DP_STR(CRC_FAILURE),
135 DP_STR(BAD_PARAM),
136 DP_STR(DEFER),
137 DP_STR(LINK_FAILURE),
138 DP_STR(NO_RESOURCES),
139 DP_STR(DPCD_FAIL),
140 DP_STR(I2C_NAK),
141 DP_STR(ALLOCATE_FAIL),
142 };
143
144 if (nak_reason >= ARRAY_SIZE(nak_reason_str) ||
145 !nak_reason_str[nak_reason])
146 return "unknown";
147
148 return nak_reason_str[nak_reason];
149}
150
151#undef DP_STR
152#define DP_STR(x) [DRM_DP_SIDEBAND_TX_ ## x] = #x
153
154static const char *drm_dp_mst_sideband_tx_state_str(int state)
155{
156 static const char * const sideband_reason_str[] = {
157 DP_STR(QUEUED),
158 DP_STR(START_SEND),
159 DP_STR(SENT),
160 DP_STR(RX),
161 DP_STR(TIMEOUT),
162 };
163
164 if (state >= ARRAY_SIZE(sideband_reason_str) ||
165 !sideband_reason_str[state])
166 return "unknown";
167
168 return sideband_reason_str[state];
169}
170
171static int
172drm_dp_mst_rad_to_str(const u8 rad[8], u8 lct, char *out, size_t len)
173{
174 int i;
175 u8 unpacked_rad[16];
176
177 for (i = 0; i < lct; i++) {
178 if (i % 2)
179 unpacked_rad[i] = rad[i / 2] >> 4;
180 else
181 unpacked_rad[i] = rad[i / 2] & BIT_MASK(4);
182 }
183
184 /* TODO: Eventually add something to printk so we can format the rad
185 * like this: 1.2.3
186 */
187 return snprintf(out, len, "%*phC", lct, unpacked_rad);
188}
189
190/* sideband msg handling */
191static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
192{
193 u8 bitmask = 0x80;
194 u8 bitshift = 7;
195 u8 array_index = 0;
196 int number_of_bits = num_nibbles * 4;
197 u8 remainder = 0;
198
199 while (number_of_bits != 0) {
200 number_of_bits--;
201 remainder <<= 1;
202 remainder |= (data[array_index] & bitmask) >> bitshift;
203 bitmask >>= 1;
204 bitshift--;
205 if (bitmask == 0) {
206 bitmask = 0x80;
207 bitshift = 7;
208 array_index++;
209 }
210 if ((remainder & 0x10) == 0x10)
211 remainder ^= 0x13;
212 }
213
214 number_of_bits = 4;
215 while (number_of_bits != 0) {
216 number_of_bits--;
217 remainder <<= 1;
218 if ((remainder & 0x10) != 0)
219 remainder ^= 0x13;
220 }
221
222 return remainder;
223}
224
225static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
226{
227 u8 bitmask = 0x80;
228 u8 bitshift = 7;
229 u8 array_index = 0;
230 int number_of_bits = number_of_bytes * 8;
231 u16 remainder = 0;
232
233 while (number_of_bits != 0) {
234 number_of_bits--;
235 remainder <<= 1;
236 remainder |= (data[array_index] & bitmask) >> bitshift;
237 bitmask >>= 1;
238 bitshift--;
239 if (bitmask == 0) {
240 bitmask = 0x80;
241 bitshift = 7;
242 array_index++;
243 }
244 if ((remainder & 0x100) == 0x100)
245 remainder ^= 0xd5;
246 }
247
248 number_of_bits = 8;
249 while (number_of_bits != 0) {
250 number_of_bits--;
251 remainder <<= 1;
252 if ((remainder & 0x100) != 0)
253 remainder ^= 0xd5;
254 }
255
256 return remainder & 0xff;
257}
258static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
259{
260 u8 size = 3;
261 size += (hdr->lct / 2);
262 return size;
263}
264
265static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
266 u8 *buf, int *len)
267{
268 int idx = 0;
269 int i;
270 u8 crc4;
271 buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
272 for (i = 0; i < (hdr->lct / 2); i++)
273 buf[idx++] = hdr->rad[i];
274 buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
275 (hdr->msg_len & 0x3f);
276 buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
277
278 crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
279 buf[idx - 1] |= (crc4 & 0xf);
280
281 *len = idx;
282}
283
284static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
285 u8 *buf, int buflen, u8 *hdrlen)
286{
287 u8 crc4;
288 u8 len;
289 int i;
290 u8 idx;
291 if (buf[0] == 0)
292 return false;
293 len = 3;
294 len += ((buf[0] & 0xf0) >> 4) / 2;
295 if (len > buflen)
296 return false;
297 crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
298
299 if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
300 DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
301 return false;
302 }
303
304 hdr->lct = (buf[0] & 0xf0) >> 4;
305 hdr->lcr = (buf[0] & 0xf);
306 idx = 1;
307 for (i = 0; i < (hdr->lct / 2); i++)
308 hdr->rad[i] = buf[idx++];
309 hdr->broadcast = (buf[idx] >> 7) & 0x1;
310 hdr->path_msg = (buf[idx] >> 6) & 0x1;
311 hdr->msg_len = buf[idx] & 0x3f;
312 idx++;
313 hdr->somt = (buf[idx] >> 7) & 0x1;
314 hdr->eomt = (buf[idx] >> 6) & 0x1;
315 hdr->seqno = (buf[idx] >> 4) & 0x1;
316 idx++;
317 *hdrlen = idx;
318 return true;
319}
320
321void
322drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req,
323 struct drm_dp_sideband_msg_tx *raw)
324{
325 int idx = 0;
326 int i;
327 u8 *buf = raw->msg;
328 buf[idx++] = req->req_type & 0x7f;
329
330 switch (req->req_type) {
331 case DP_ENUM_PATH_RESOURCES:
332 case DP_POWER_DOWN_PHY:
333 case DP_POWER_UP_PHY:
334 buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
335 idx++;
336 break;
337 case DP_ALLOCATE_PAYLOAD:
338 buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
339 (req->u.allocate_payload.number_sdp_streams & 0xf);
340 idx++;
341 buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
342 idx++;
343 buf[idx] = (req->u.allocate_payload.pbn >> 8);
344 idx++;
345 buf[idx] = (req->u.allocate_payload.pbn & 0xff);
346 idx++;
347 for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
348 buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
349 (req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
350 idx++;
351 }
352 if (req->u.allocate_payload.number_sdp_streams & 1) {
353 i = req->u.allocate_payload.number_sdp_streams - 1;
354 buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
355 idx++;
356 }
357 break;
358 case DP_QUERY_PAYLOAD:
359 buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
360 idx++;
361 buf[idx] = (req->u.query_payload.vcpi & 0x7f);
362 idx++;
363 break;
364 case DP_REMOTE_DPCD_READ:
365 buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
366 buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
367 idx++;
368 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
369 idx++;
370 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
371 idx++;
372 buf[idx] = (req->u.dpcd_read.num_bytes);
373 idx++;
374 break;
375
376 case DP_REMOTE_DPCD_WRITE:
377 buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
378 buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
379 idx++;
380 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
381 idx++;
382 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
383 idx++;
384 buf[idx] = (req->u.dpcd_write.num_bytes);
385 idx++;
386 memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
387 idx += req->u.dpcd_write.num_bytes;
388 break;
389 case DP_REMOTE_I2C_READ:
390 buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
391 buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
392 idx++;
393 for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
394 buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
395 idx++;
396 buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
397 idx++;
398 memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
399 idx += req->u.i2c_read.transactions[i].num_bytes;
400
401 buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 4;
402 buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
403 idx++;
404 }
405 buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
406 idx++;
407 buf[idx] = (req->u.i2c_read.num_bytes_read);
408 idx++;
409 break;
410
411 case DP_REMOTE_I2C_WRITE:
412 buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
413 idx++;
414 buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
415 idx++;
416 buf[idx] = (req->u.i2c_write.num_bytes);
417 idx++;
418 memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
419 idx += req->u.i2c_write.num_bytes;
420 break;
421 }
422 raw->cur_len = idx;
423}
424EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_encode_sideband_req);
425
426/* Decode a sideband request we've encoded, mainly used for debugging */
427int
428drm_dp_decode_sideband_req(const struct drm_dp_sideband_msg_tx *raw,
429 struct drm_dp_sideband_msg_req_body *req)
430{
431 const u8 *buf = raw->msg;
432 int i, idx = 0;
433
434 req->req_type = buf[idx++] & 0x7f;
435 switch (req->req_type) {
436 case DP_ENUM_PATH_RESOURCES:
437 case DP_POWER_DOWN_PHY:
438 case DP_POWER_UP_PHY:
439 req->u.port_num.port_number = (buf[idx] >> 4) & 0xf;
440 break;
441 case DP_ALLOCATE_PAYLOAD:
442 {
443 struct drm_dp_allocate_payload *a =
444 &req->u.allocate_payload;
445
446 a->number_sdp_streams = buf[idx] & 0xf;
447 a->port_number = (buf[idx] >> 4) & 0xf;
448
449 WARN_ON(buf[++idx] & 0x80);
450 a->vcpi = buf[idx] & 0x7f;
451
452 a->pbn = buf[++idx] << 8;
453 a->pbn |= buf[++idx];
454
455 idx++;
456 for (i = 0; i < a->number_sdp_streams; i++) {
457 a->sdp_stream_sink[i] =
458 (buf[idx + (i / 2)] >> ((i % 2) ? 0 : 4)) & 0xf;
459 }
460 }
461 break;
462 case DP_QUERY_PAYLOAD:
463 req->u.query_payload.port_number = (buf[idx] >> 4) & 0xf;
464 WARN_ON(buf[++idx] & 0x80);
465 req->u.query_payload.vcpi = buf[idx] & 0x7f;
466 break;
467 case DP_REMOTE_DPCD_READ:
468 {
469 struct drm_dp_remote_dpcd_read *r = &req->u.dpcd_read;
470
471 r->port_number = (buf[idx] >> 4) & 0xf;
472
473 r->dpcd_address = (buf[idx] << 16) & 0xf0000;
474 r->dpcd_address |= (buf[++idx] << 8) & 0xff00;
475 r->dpcd_address |= buf[++idx] & 0xff;
476
477 r->num_bytes = buf[++idx];
478 }
479 break;
480 case DP_REMOTE_DPCD_WRITE:
481 {
482 struct drm_dp_remote_dpcd_write *w =
483 &req->u.dpcd_write;
484
485 w->port_number = (buf[idx] >> 4) & 0xf;
486
487 w->dpcd_address = (buf[idx] << 16) & 0xf0000;
488 w->dpcd_address |= (buf[++idx] << 8) & 0xff00;
489 w->dpcd_address |= buf[++idx] & 0xff;
490
491 w->num_bytes = buf[++idx];
492
493 w->bytes = kmemdup(&buf[++idx], w->num_bytes,
494 GFP_KERNEL);
495 if (!w->bytes)
496 return -ENOMEM;
497 }
498 break;
499 case DP_REMOTE_I2C_READ:
500 {
501 struct drm_dp_remote_i2c_read *r = &req->u.i2c_read;
502 struct drm_dp_remote_i2c_read_tx *tx;
503 bool failed = false;
504
505 r->num_transactions = buf[idx] & 0x3;
506 r->port_number = (buf[idx] >> 4) & 0xf;
507 for (i = 0; i < r->num_transactions; i++) {
508 tx = &r->transactions[i];
509
510 tx->i2c_dev_id = buf[++idx] & 0x7f;
511 tx->num_bytes = buf[++idx];
512 tx->bytes = kmemdup(&buf[++idx],
513 tx->num_bytes,
514 GFP_KERNEL);
515 if (!tx->bytes) {
516 failed = true;
517 break;
518 }
519 idx += tx->num_bytes;
520 tx->no_stop_bit = (buf[idx] >> 5) & 0x1;
521 tx->i2c_transaction_delay = buf[idx] & 0xf;
522 }
523
524 if (failed) {
525 for (i = 0; i < r->num_transactions; i++) {
526 tx = &r->transactions[i];
527 kfree(tx->bytes);
528 }
529 return -ENOMEM;
530 }
531
532 r->read_i2c_device_id = buf[++idx] & 0x7f;
533 r->num_bytes_read = buf[++idx];
534 }
535 break;
536 case DP_REMOTE_I2C_WRITE:
537 {
538 struct drm_dp_remote_i2c_write *w = &req->u.i2c_write;
539
540 w->port_number = (buf[idx] >> 4) & 0xf;
541 w->write_i2c_device_id = buf[++idx] & 0x7f;
542 w->num_bytes = buf[++idx];
543 w->bytes = kmemdup(&buf[++idx], w->num_bytes,
544 GFP_KERNEL);
545 if (!w->bytes)
546 return -ENOMEM;
547 }
548 break;
549 }
550
551 return 0;
552}
553EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_decode_sideband_req);
554
555void
556drm_dp_dump_sideband_msg_req_body(const struct drm_dp_sideband_msg_req_body *req,
557 int indent, struct drm_printer *printer)
558{
559 int i;
560
561#define P(f, ...) drm_printf_indent(printer, indent, f, ##__VA_ARGS__)
562 if (req->req_type == DP_LINK_ADDRESS) {
563 /* No contents to print */
564 P("type=%s\n", drm_dp_mst_req_type_str(req->req_type));
565 return;
566 }
567
568 P("type=%s contents:\n", drm_dp_mst_req_type_str(req->req_type));
569 indent++;
570
571 switch (req->req_type) {
572 case DP_ENUM_PATH_RESOURCES:
573 case DP_POWER_DOWN_PHY:
574 case DP_POWER_UP_PHY:
575 P("port=%d\n", req->u.port_num.port_number);
576 break;
577 case DP_ALLOCATE_PAYLOAD:
578 P("port=%d vcpi=%d pbn=%d sdp_streams=%d %*ph\n",
579 req->u.allocate_payload.port_number,
580 req->u.allocate_payload.vcpi, req->u.allocate_payload.pbn,
581 req->u.allocate_payload.number_sdp_streams,
582 req->u.allocate_payload.number_sdp_streams,
583 req->u.allocate_payload.sdp_stream_sink);
584 break;
585 case DP_QUERY_PAYLOAD:
586 P("port=%d vcpi=%d\n",
587 req->u.query_payload.port_number,
588 req->u.query_payload.vcpi);
589 break;
590 case DP_REMOTE_DPCD_READ:
591 P("port=%d dpcd_addr=%05x len=%d\n",
592 req->u.dpcd_read.port_number, req->u.dpcd_read.dpcd_address,
593 req->u.dpcd_read.num_bytes);
594 break;
595 case DP_REMOTE_DPCD_WRITE:
596 P("port=%d addr=%05x len=%d: %*ph\n",
597 req->u.dpcd_write.port_number,
598 req->u.dpcd_write.dpcd_address,
599 req->u.dpcd_write.num_bytes, req->u.dpcd_write.num_bytes,
600 req->u.dpcd_write.bytes);
601 break;
602 case DP_REMOTE_I2C_READ:
603 P("port=%d num_tx=%d id=%d size=%d:\n",
604 req->u.i2c_read.port_number,
605 req->u.i2c_read.num_transactions,
606 req->u.i2c_read.read_i2c_device_id,
607 req->u.i2c_read.num_bytes_read);
608
609 indent++;
610 for (i = 0; i < req->u.i2c_read.num_transactions; i++) {
611 const struct drm_dp_remote_i2c_read_tx *rtx =
612 &req->u.i2c_read.transactions[i];
613
614 P("%d: id=%03d size=%03d no_stop_bit=%d tx_delay=%03d: %*ph\n",
615 i, rtx->i2c_dev_id, rtx->num_bytes,
616 rtx->no_stop_bit, rtx->i2c_transaction_delay,
617 rtx->num_bytes, rtx->bytes);
618 }
619 break;
620 case DP_REMOTE_I2C_WRITE:
621 P("port=%d id=%d size=%d: %*ph\n",
622 req->u.i2c_write.port_number,
623 req->u.i2c_write.write_i2c_device_id,
624 req->u.i2c_write.num_bytes, req->u.i2c_write.num_bytes,
625 req->u.i2c_write.bytes);
626 break;
627 default:
628 P("???\n");
629 break;
630 }
631#undef P
632}
633EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_dump_sideband_msg_req_body);
634
635static inline void
636drm_dp_mst_dump_sideband_msg_tx(struct drm_printer *p,
637 const struct drm_dp_sideband_msg_tx *txmsg)
638{
639 struct drm_dp_sideband_msg_req_body req;
640 char buf[64];
641 int ret;
642 int i;
643
644 drm_dp_mst_rad_to_str(txmsg->dst->rad, txmsg->dst->lct, buf,
645 sizeof(buf));
646 drm_printf(p, "txmsg cur_offset=%x cur_len=%x seqno=%x state=%s path_msg=%d dst=%s\n",
647 txmsg->cur_offset, txmsg->cur_len, txmsg->seqno,
648 drm_dp_mst_sideband_tx_state_str(txmsg->state),
649 txmsg->path_msg, buf);
650
651 ret = drm_dp_decode_sideband_req(txmsg, &req);
652 if (ret) {
653 drm_printf(p, "<failed to decode sideband req: %d>\n", ret);
654 return;
655 }
656 drm_dp_dump_sideband_msg_req_body(&req, 1, p);
657
658 switch (req.req_type) {
659 case DP_REMOTE_DPCD_WRITE:
660 kfree(req.u.dpcd_write.bytes);
661 break;
662 case DP_REMOTE_I2C_READ:
663 for (i = 0; i < req.u.i2c_read.num_transactions; i++)
664 kfree(req.u.i2c_read.transactions[i].bytes);
665 break;
666 case DP_REMOTE_I2C_WRITE:
667 kfree(req.u.i2c_write.bytes);
668 break;
669 }
670}
671
672static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
673{
674 u8 crc4;
675 crc4 = drm_dp_msg_data_crc4(msg, len);
676 msg[len] = crc4;
677}
678
679static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
680 struct drm_dp_sideband_msg_tx *raw)
681{
682 int idx = 0;
683 u8 *buf = raw->msg;
684
685 buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
686
687 raw->cur_len = idx;
688}
689
690/* this adds a chunk of msg to the builder to get the final msg */
691static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
692 u8 *replybuf, u8 replybuflen, bool hdr)
693{
694 int ret;
695 u8 crc4;
696
697 if (hdr) {
698 u8 hdrlen;
699 struct drm_dp_sideband_msg_hdr recv_hdr;
700 ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen);
701 if (ret == false) {
702 print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false);
703 return false;
704 }
705
706 /*
707 * ignore out-of-order messages or messages that are part of a
708 * failed transaction
709 */
710 if (!recv_hdr.somt && !msg->have_somt)
711 return false;
712
713 /* get length contained in this portion */
714 msg->curchunk_len = recv_hdr.msg_len;
715 msg->curchunk_hdrlen = hdrlen;
716
717 /* we have already gotten an somt - don't bother parsing */
718 if (recv_hdr.somt && msg->have_somt)
719 return false;
720
721 if (recv_hdr.somt) {
722 memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr));
723 msg->have_somt = true;
724 }
725 if (recv_hdr.eomt)
726 msg->have_eomt = true;
727
728 /* copy the bytes for the remainder of this header chunk */
729 msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen));
730 memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx);
731 } else {
732 memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
733 msg->curchunk_idx += replybuflen;
734 }
735
736 if (msg->curchunk_idx >= msg->curchunk_len) {
737 /* do CRC */
738 crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
739 if (crc4 != msg->chunk[msg->curchunk_len - 1])
740 print_hex_dump(KERN_DEBUG, "wrong crc",
741 DUMP_PREFIX_NONE, 16, 1,
742 msg->chunk, msg->curchunk_len, false);
743 /* copy chunk into bigger msg */
744 memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
745 msg->curlen += msg->curchunk_len - 1;
746 }
747 return true;
748}
749
750static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw,
751 struct drm_dp_sideband_msg_reply_body *repmsg)
752{
753 int idx = 1;
754 int i;
755 memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
756 idx += 16;
757 repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
758 idx++;
759 if (idx > raw->curlen)
760 goto fail_len;
761 for (i = 0; i < repmsg->u.link_addr.nports; i++) {
762 if (raw->msg[idx] & 0x80)
763 repmsg->u.link_addr.ports[i].input_port = 1;
764
765 repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
766 repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
767
768 idx++;
769 if (idx > raw->curlen)
770 goto fail_len;
771 repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
772 repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
773 if (repmsg->u.link_addr.ports[i].input_port == 0)
774 repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
775 idx++;
776 if (idx > raw->curlen)
777 goto fail_len;
778 if (repmsg->u.link_addr.ports[i].input_port == 0) {
779 repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
780 idx++;
781 if (idx > raw->curlen)
782 goto fail_len;
783 memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
784 idx += 16;
785 if (idx > raw->curlen)
786 goto fail_len;
787 repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
788 repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
789 idx++;
790
791 }
792 if (idx > raw->curlen)
793 goto fail_len;
794 }
795
796 return true;
797fail_len:
798 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
799 return false;
800}
801
802static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
803 struct drm_dp_sideband_msg_reply_body *repmsg)
804{
805 int idx = 1;
806 repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
807 idx++;
808 if (idx > raw->curlen)
809 goto fail_len;
810 repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
811 idx++;
812 if (idx > raw->curlen)
813 goto fail_len;
814
815 memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
816 return true;
817fail_len:
818 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
819 return false;
820}
821
822static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
823 struct drm_dp_sideband_msg_reply_body *repmsg)
824{
825 int idx = 1;
826 repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
827 idx++;
828 if (idx > raw->curlen)
829 goto fail_len;
830 return true;
831fail_len:
832 DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
833 return false;
834}
835
836static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
837 struct drm_dp_sideband_msg_reply_body *repmsg)
838{
839 int idx = 1;
840
841 repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
842 idx++;
843 if (idx > raw->curlen)
844 goto fail_len;
845 repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
846 idx++;
847 /* TODO check */
848 memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
849 return true;
850fail_len:
851 DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
852 return false;
853}
854
855static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
856 struct drm_dp_sideband_msg_reply_body *repmsg)
857{
858 int idx = 1;
859 repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
860 repmsg->u.path_resources.fec_capable = raw->msg[idx] & 0x1;
861 idx++;
862 if (idx > raw->curlen)
863 goto fail_len;
864 repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
865 idx += 2;
866 if (idx > raw->curlen)
867 goto fail_len;
868 repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
869 idx += 2;
870 if (idx > raw->curlen)
871 goto fail_len;
872 return true;
873fail_len:
874 DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
875 return false;
876}
877
878static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
879 struct drm_dp_sideband_msg_reply_body *repmsg)
880{
881 int idx = 1;
882 repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
883 idx++;
884 if (idx > raw->curlen)
885 goto fail_len;
886 repmsg->u.allocate_payload.vcpi = raw->msg[idx];
887 idx++;
888 if (idx > raw->curlen)
889 goto fail_len;
890 repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
891 idx += 2;
892 if (idx > raw->curlen)
893 goto fail_len;
894 return true;
895fail_len:
896 DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
897 return false;
898}
899
900static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
901 struct drm_dp_sideband_msg_reply_body *repmsg)
902{
903 int idx = 1;
904 repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
905 idx++;
906 if (idx > raw->curlen)
907 goto fail_len;
908 repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
909 idx += 2;
910 if (idx > raw->curlen)
911 goto fail_len;
912 return true;
913fail_len:
914 DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
915 return false;
916}
917
918static bool drm_dp_sideband_parse_power_updown_phy_ack(struct drm_dp_sideband_msg_rx *raw,
919 struct drm_dp_sideband_msg_reply_body *repmsg)
920{
921 int idx = 1;
922
923 repmsg->u.port_number.port_number = (raw->msg[idx] >> 4) & 0xf;
924 idx++;
925 if (idx > raw->curlen) {
926 DRM_DEBUG_KMS("power up/down phy parse length fail %d %d\n",
927 idx, raw->curlen);
928 return false;
929 }
930 return true;
931}
932
933static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
934 struct drm_dp_sideband_msg_reply_body *msg)
935{
936 memset(msg, 0, sizeof(*msg));
937 msg->reply_type = (raw->msg[0] & 0x80) >> 7;
938 msg->req_type = (raw->msg[0] & 0x7f);
939
940 if (msg->reply_type == DP_SIDEBAND_REPLY_NAK) {
941 memcpy(msg->u.nak.guid, &raw->msg[1], 16);
942 msg->u.nak.reason = raw->msg[17];
943 msg->u.nak.nak_data = raw->msg[18];
944 return false;
945 }
946
947 switch (msg->req_type) {
948 case DP_LINK_ADDRESS:
949 return drm_dp_sideband_parse_link_address(raw, msg);
950 case DP_QUERY_PAYLOAD:
951 return drm_dp_sideband_parse_query_payload_ack(raw, msg);
952 case DP_REMOTE_DPCD_READ:
953 return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
954 case DP_REMOTE_DPCD_WRITE:
955 return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
956 case DP_REMOTE_I2C_READ:
957 return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
958 case DP_ENUM_PATH_RESOURCES:
959 return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
960 case DP_ALLOCATE_PAYLOAD:
961 return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
962 case DP_POWER_DOWN_PHY:
963 case DP_POWER_UP_PHY:
964 return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg);
965 case DP_CLEAR_PAYLOAD_ID_TABLE:
966 return true; /* since there's nothing to parse */
967 default:
968 DRM_ERROR("Got unknown reply 0x%02x (%s)\n", msg->req_type,
969 drm_dp_mst_req_type_str(msg->req_type));
970 return false;
971 }
972}
973
974static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw,
975 struct drm_dp_sideband_msg_req_body *msg)
976{
977 int idx = 1;
978
979 msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
980 idx++;
981 if (idx > raw->curlen)
982 goto fail_len;
983
984 memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
985 idx += 16;
986 if (idx > raw->curlen)
987 goto fail_len;
988
989 msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
990 msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
991 msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
992 msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
993 msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
994 idx++;
995 return true;
996fail_len:
997 DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen);
998 return false;
999}
1000
1001static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw,
1002 struct drm_dp_sideband_msg_req_body *msg)
1003{
1004 int idx = 1;
1005
1006 msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
1007 idx++;
1008 if (idx > raw->curlen)
1009 goto fail_len;
1010
1011 memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
1012 idx += 16;
1013 if (idx > raw->curlen)
1014 goto fail_len;
1015
1016 msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
1017 idx++;
1018 return true;
1019fail_len:
1020 DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen);
1021 return false;
1022}
1023
1024static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
1025 struct drm_dp_sideband_msg_req_body *msg)
1026{
1027 memset(msg, 0, sizeof(*msg));
1028 msg->req_type = (raw->msg[0] & 0x7f);
1029
1030 switch (msg->req_type) {
1031 case DP_CONNECTION_STATUS_NOTIFY:
1032 return drm_dp_sideband_parse_connection_status_notify(raw, msg);
1033 case DP_RESOURCE_STATUS_NOTIFY:
1034 return drm_dp_sideband_parse_resource_status_notify(raw, msg);
1035 default:
1036 DRM_ERROR("Got unknown request 0x%02x (%s)\n", msg->req_type,
1037 drm_dp_mst_req_type_str(msg->req_type));
1038 return false;
1039 }
1040}
1041
1042static void build_dpcd_write(struct drm_dp_sideband_msg_tx *msg,
1043 u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
1044{
1045 struct drm_dp_sideband_msg_req_body req;
1046
1047 req.req_type = DP_REMOTE_DPCD_WRITE;
1048 req.u.dpcd_write.port_number = port_num;
1049 req.u.dpcd_write.dpcd_address = offset;
1050 req.u.dpcd_write.num_bytes = num_bytes;
1051 req.u.dpcd_write.bytes = bytes;
1052 drm_dp_encode_sideband_req(&req, msg);
1053}
1054
1055static void build_link_address(struct drm_dp_sideband_msg_tx *msg)
1056{
1057 struct drm_dp_sideband_msg_req_body req;
1058
1059 req.req_type = DP_LINK_ADDRESS;
1060 drm_dp_encode_sideband_req(&req, msg);
1061}
1062
1063static int build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
1064{
1065 struct drm_dp_sideband_msg_req_body req;
1066
1067 req.req_type = DP_CLEAR_PAYLOAD_ID_TABLE;
1068 drm_dp_encode_sideband_req(&req, msg);
1069 return 0;
1070}
1071
1072static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg,
1073 int port_num)
1074{
1075 struct drm_dp_sideband_msg_req_body req;
1076
1077 req.req_type = DP_ENUM_PATH_RESOURCES;
1078 req.u.port_num.port_number = port_num;
1079 drm_dp_encode_sideband_req(&req, msg);
1080 msg->path_msg = true;
1081 return 0;
1082}
1083
1084static void build_allocate_payload(struct drm_dp_sideband_msg_tx *msg,
1085 int port_num,
1086 u8 vcpi, uint16_t pbn,
1087 u8 number_sdp_streams,
1088 u8 *sdp_stream_sink)
1089{
1090 struct drm_dp_sideband_msg_req_body req;
1091 memset(&req, 0, sizeof(req));
1092 req.req_type = DP_ALLOCATE_PAYLOAD;
1093 req.u.allocate_payload.port_number = port_num;
1094 req.u.allocate_payload.vcpi = vcpi;
1095 req.u.allocate_payload.pbn = pbn;
1096 req.u.allocate_payload.number_sdp_streams = number_sdp_streams;
1097 memcpy(req.u.allocate_payload.sdp_stream_sink, sdp_stream_sink,
1098 number_sdp_streams);
1099 drm_dp_encode_sideband_req(&req, msg);
1100 msg->path_msg = true;
1101}
1102
1103static void build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
1104 int port_num, bool power_up)
1105{
1106 struct drm_dp_sideband_msg_req_body req;
1107
1108 if (power_up)
1109 req.req_type = DP_POWER_UP_PHY;
1110 else
1111 req.req_type = DP_POWER_DOWN_PHY;
1112
1113 req.u.port_num.port_number = port_num;
1114 drm_dp_encode_sideband_req(&req, msg);
1115 msg->path_msg = true;
1116}
1117
1118static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
1119 struct drm_dp_vcpi *vcpi)
1120{
1121 int ret, vcpi_ret;
1122
1123 mutex_lock(&mgr->payload_lock);
1124 ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
1125 if (ret > mgr->max_payloads) {
1126 ret = -EINVAL;
1127 DRM_DEBUG_KMS("out of payload ids %d\n", ret);
1128 goto out_unlock;
1129 }
1130
1131 vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1);
1132 if (vcpi_ret > mgr->max_payloads) {
1133 ret = -EINVAL;
1134 DRM_DEBUG_KMS("out of vcpi ids %d\n", ret);
1135 goto out_unlock;
1136 }
1137
1138 set_bit(ret, &mgr->payload_mask);
1139 set_bit(vcpi_ret, &mgr->vcpi_mask);
1140 vcpi->vcpi = vcpi_ret + 1;
1141 mgr->proposed_vcpis[ret - 1] = vcpi;
1142out_unlock:
1143 mutex_unlock(&mgr->payload_lock);
1144 return ret;
1145}
1146
1147static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
1148 int vcpi)
1149{
1150 int i;
1151 if (vcpi == 0)
1152 return;
1153
1154 mutex_lock(&mgr->payload_lock);
1155 DRM_DEBUG_KMS("putting payload %d\n", vcpi);
1156 clear_bit(vcpi - 1, &mgr->vcpi_mask);
1157
1158 for (i = 0; i < mgr->max_payloads; i++) {
1159 if (mgr->proposed_vcpis[i] &&
1160 mgr->proposed_vcpis[i]->vcpi == vcpi) {
1161 mgr->proposed_vcpis[i] = NULL;
1162 clear_bit(i + 1, &mgr->payload_mask);
1163 }
1164 }
1165 mutex_unlock(&mgr->payload_lock);
1166}
1167
1168static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
1169 struct drm_dp_sideband_msg_tx *txmsg)
1170{
1171 unsigned int state;
1172
1173 /*
1174 * All updates to txmsg->state are protected by mgr->qlock, and the two
1175 * cases we check here are terminal states. For those the barriers
1176 * provided by the wake_up/wait_event pair are enough.
1177 */
1178 state = READ_ONCE(txmsg->state);
1179 return (state == DRM_DP_SIDEBAND_TX_RX ||
1180 state == DRM_DP_SIDEBAND_TX_TIMEOUT);
1181}
1182
1183static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
1184 struct drm_dp_sideband_msg_tx *txmsg)
1185{
1186 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
1187 int ret;
1188
1189 ret = wait_event_timeout(mgr->tx_waitq,
1190 check_txmsg_state(mgr, txmsg),
1191 (4 * HZ));
1192 mutex_lock(&mstb->mgr->qlock);
1193 if (ret > 0) {
1194 if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
1195 ret = -EIO;
1196 goto out;
1197 }
1198 } else {
1199 DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno);
1200
1201 /* dump some state */
1202 ret = -EIO;
1203
1204 /* remove from q */
1205 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
1206 txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) {
1207 list_del(&txmsg->next);
1208 }
1209
1210 if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
1211 txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
1212 mstb->tx_slots[txmsg->seqno] = NULL;
1213 }
1214 mgr->is_waiting_for_dwn_reply = false;
1215
1216 }
1217out:
1218 if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
1219 struct drm_printer p = drm_debug_printer(DBG_PREFIX);
1220
1221 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
1222 }
1223 mutex_unlock(&mgr->qlock);
1224
1225 drm_dp_mst_kick_tx(mgr);
1226 return ret;
1227}
1228
1229static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
1230{
1231 struct drm_dp_mst_branch *mstb;
1232
1233 mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
1234 if (!mstb)
1235 return NULL;
1236
1237 mstb->lct = lct;
1238 if (lct > 1)
1239 memcpy(mstb->rad, rad, lct / 2);
1240 INIT_LIST_HEAD(&mstb->ports);
1241 kref_init(&mstb->topology_kref);
1242 kref_init(&mstb->malloc_kref);
1243 return mstb;
1244}
1245
1246static void drm_dp_free_mst_branch_device(struct kref *kref)
1247{
1248 struct drm_dp_mst_branch *mstb =
1249 container_of(kref, struct drm_dp_mst_branch, malloc_kref);
1250
1251 if (mstb->port_parent)
1252 drm_dp_mst_put_port_malloc(mstb->port_parent);
1253
1254 kfree(mstb);
1255}
1256
1257/**
1258 * DOC: Branch device and port refcounting
1259 *
1260 * Topology refcount overview
1261 * ~~~~~~~~~~~~~~~~~~~~~~~~~~
1262 *
1263 * The refcounting schemes for &struct drm_dp_mst_branch and &struct
1264 * drm_dp_mst_port are somewhat unusual. Both ports and branch devices have
1265 * two different kinds of refcounts: topology refcounts, and malloc refcounts.
1266 *
1267 * Topology refcounts are not exposed to drivers, and are handled internally
1268 * by the DP MST helpers. The helpers use them in order to prevent the
1269 * in-memory topology state from being changed in the middle of critical
1270 * operations like changing the internal state of payload allocations. This
1271 * means each branch and port will be considered to be connected to the rest
1272 * of the topology until its topology refcount reaches zero. Additionally,
1273 * for ports this means that their associated &struct drm_connector will stay
1274 * registered with userspace until the port's refcount reaches 0.
1275 *
1276 * Malloc refcount overview
1277 * ~~~~~~~~~~~~~~~~~~~~~~~~
1278 *
1279 * Malloc references are used to keep a &struct drm_dp_mst_port or &struct
1280 * drm_dp_mst_branch allocated even after all of its topology references have
1281 * been dropped, so that the driver or MST helpers can safely access each
1282 * branch's last known state before it was disconnected from the topology.
1283 * When the malloc refcount of a port or branch reaches 0, the memory
1284 * allocation containing the &struct drm_dp_mst_branch or &struct
1285 * drm_dp_mst_port respectively will be freed.
1286 *
1287 * For &struct drm_dp_mst_branch, malloc refcounts are not currently exposed
1288 * to drivers. As of writing this documentation, there are no drivers that
1289 * have a usecase for accessing &struct drm_dp_mst_branch outside of the MST
1290 * helpers. Exposing this API to drivers in a race-free manner would take more
1291 * tweaking of the refcounting scheme, however patches are welcome provided
1292 * there is a legitimate driver usecase for this.
1293 *
1294 * Refcount relationships in a topology
1295 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1296 *
1297 * Let's take a look at why the relationship between topology and malloc
1298 * refcounts is designed the way it is.
1299 *
1300 * .. kernel-figure:: dp-mst/topology-figure-1.dot
1301 *
1302 * An example of topology and malloc refs in a DP MST topology with two
1303 * active payloads. Topology refcount increments are indicated by solid
1304 * lines, and malloc refcount increments are indicated by dashed lines.
1305 * Each starts from the branch which incremented the refcount, and ends at
1306 * the branch to which the refcount belongs to, i.e. the arrow points the
1307 * same way as the C pointers used to reference a structure.
1308 *
1309 * As you can see in the above figure, every branch increments the topology
1310 * refcount of its children, and increments the malloc refcount of its
1311 * parent. Additionally, every payload increments the malloc refcount of its
1312 * assigned port by 1.
1313 *
1314 * So, what would happen if MSTB #3 from the above figure was unplugged from
1315 * the system, but the driver hadn't yet removed payload #2 from port #3? The
1316 * topology would start to look like the figure below.
1317 *
1318 * .. kernel-figure:: dp-mst/topology-figure-2.dot
1319 *
1320 * Ports and branch devices which have been released from memory are
1321 * colored grey, and references which have been removed are colored red.
1322 *
1323 * Whenever a port or branch device's topology refcount reaches zero, it will
1324 * decrement the topology refcounts of all its children, the malloc refcount
1325 * of its parent, and finally its own malloc refcount. For MSTB #4 and port
1326 * #4, this means they both have been disconnected from the topology and freed
1327 * from memory. But, because payload #2 is still holding a reference to port
1328 * #3, port #3 is removed from the topology but its &struct drm_dp_mst_port
1329 * is still accessible from memory. This also means port #3 has not yet
1330 * decremented the malloc refcount of MSTB #3, so its &struct
1331 * drm_dp_mst_branch will also stay allocated in memory until port #3's
1332 * malloc refcount reaches 0.
1333 *
1334 * This relationship is necessary because in order to release payload #2, we
1335 * need to be able to figure out the last relative of port #3 that's still
1336 * connected to the topology. In this case, we would travel up the topology as
1337 * shown below.
1338 *
1339 * .. kernel-figure:: dp-mst/topology-figure-3.dot
1340 *
1341 * And finally, remove payload #2 by communicating with port #2 through
1342 * sideband transactions.
1343 */
1344
1345/**
1346 * drm_dp_mst_get_mstb_malloc() - Increment the malloc refcount of a branch
1347 * device
1348 * @mstb: The &struct drm_dp_mst_branch to increment the malloc refcount of
1349 *
1350 * Increments &drm_dp_mst_branch.malloc_kref. When
1351 * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
1352 * will be released and @mstb may no longer be used.
1353 *
1354 * See also: drm_dp_mst_put_mstb_malloc()
1355 */
1356static void
1357drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch *mstb)
1358{
1359 kref_get(&mstb->malloc_kref);
1360 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref));
1361}
1362
1363/**
1364 * drm_dp_mst_put_mstb_malloc() - Decrement the malloc refcount of a branch
1365 * device
1366 * @mstb: The &struct drm_dp_mst_branch to decrement the malloc refcount of
1367 *
1368 * Decrements &drm_dp_mst_branch.malloc_kref. When
1369 * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
1370 * will be released and @mstb may no longer be used.
1371 *
1372 * See also: drm_dp_mst_get_mstb_malloc()
1373 */
1374static void
1375drm_dp_mst_put_mstb_malloc(struct drm_dp_mst_branch *mstb)
1376{
1377 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref) - 1);
1378 kref_put(&mstb->malloc_kref, drm_dp_free_mst_branch_device);
1379}
1380
1381static void drm_dp_free_mst_port(struct kref *kref)
1382{
1383 struct drm_dp_mst_port *port =
1384 container_of(kref, struct drm_dp_mst_port, malloc_kref);
1385
1386 drm_dp_mst_put_mstb_malloc(port->parent);
1387 kfree(port);
1388}
1389
1390/**
1391 * drm_dp_mst_get_port_malloc() - Increment the malloc refcount of an MST port
1392 * @port: The &struct drm_dp_mst_port to increment the malloc refcount of
1393 *
1394 * Increments &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
1395 * reaches 0, the memory allocation for @port will be released and @port may
1396 * no longer be used.
1397 *
1398 * Because @port could potentially be freed at any time by the DP MST helpers
1399 * if &drm_dp_mst_port.malloc_kref reaches 0, including during a call to this
1400 * function, drivers that which to make use of &struct drm_dp_mst_port should
1401 * ensure that they grab at least one main malloc reference to their MST ports
1402 * in &drm_dp_mst_topology_cbs.add_connector. This callback is called before
1403 * there is any chance for &drm_dp_mst_port.malloc_kref to reach 0.
1404 *
1405 * See also: drm_dp_mst_put_port_malloc()
1406 */
1407void
1408drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port)
1409{
1410 kref_get(&port->malloc_kref);
1411 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref));
1412}
1413EXPORT_SYMBOL(drm_dp_mst_get_port_malloc);
1414
1415/**
1416 * drm_dp_mst_put_port_malloc() - Decrement the malloc refcount of an MST port
1417 * @port: The &struct drm_dp_mst_port to decrement the malloc refcount of
1418 *
1419 * Decrements &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
1420 * reaches 0, the memory allocation for @port will be released and @port may
1421 * no longer be used.
1422 *
1423 * See also: drm_dp_mst_get_port_malloc()
1424 */
1425void
1426drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port)
1427{
1428 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref) - 1);
1429 kref_put(&port->malloc_kref, drm_dp_free_mst_port);
1430}
1431EXPORT_SYMBOL(drm_dp_mst_put_port_malloc);
1432
1433#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
1434
1435#define STACK_DEPTH 8
1436
1437static noinline void
1438__topology_ref_save(struct drm_dp_mst_topology_mgr *mgr,
1439 struct drm_dp_mst_topology_ref_history *history,
1440 enum drm_dp_mst_topology_ref_type type)
1441{
1442 struct drm_dp_mst_topology_ref_entry *entry = NULL;
1443 depot_stack_handle_t backtrace;
1444 ulong stack_entries[STACK_DEPTH];
1445 uint n;
1446 int i;
1447
1448 n = stack_trace_save(stack_entries, ARRAY_SIZE(stack_entries), 1);
1449 backtrace = stack_depot_save(stack_entries, n, GFP_KERNEL);
1450 if (!backtrace)
1451 return;
1452
1453 /* Try to find an existing entry for this backtrace */
1454 for (i = 0; i < history->len; i++) {
1455 if (history->entries[i].backtrace == backtrace) {
1456 entry = &history->entries[i];
1457 break;
1458 }
1459 }
1460
1461 /* Otherwise add one */
1462 if (!entry) {
1463 struct drm_dp_mst_topology_ref_entry *new;
1464 int new_len = history->len + 1;
1465
1466 new = krealloc(history->entries, sizeof(*new) * new_len,
1467 GFP_KERNEL);
1468 if (!new)
1469 return;
1470
1471 entry = &new[history->len];
1472 history->len = new_len;
1473 history->entries = new;
1474
1475 entry->backtrace = backtrace;
1476 entry->type = type;
1477 entry->count = 0;
1478 }
1479 entry->count++;
1480 entry->ts_nsec = ktime_get_ns();
1481}
1482
1483static int
1484topology_ref_history_cmp(const void *a, const void *b)
1485{
1486 const struct drm_dp_mst_topology_ref_entry *entry_a = a, *entry_b = b;
1487
1488 if (entry_a->ts_nsec > entry_b->ts_nsec)
1489 return 1;
1490 else if (entry_a->ts_nsec < entry_b->ts_nsec)
1491 return -1;
1492 else
1493 return 0;
1494}
1495
1496static inline const char *
1497topology_ref_type_to_str(enum drm_dp_mst_topology_ref_type type)
1498{
1499 if (type == DRM_DP_MST_TOPOLOGY_REF_GET)
1500 return "get";
1501 else
1502 return "put";
1503}
1504
1505static void
1506__dump_topology_ref_history(struct drm_dp_mst_topology_ref_history *history,
1507 void *ptr, const char *type_str)
1508{
1509 struct drm_printer p = drm_debug_printer(DBG_PREFIX);
1510 char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
1511 int i;
1512
1513 if (!buf)
1514 return;
1515
1516 if (!history->len)
1517 goto out;
1518
1519 /* First, sort the list so that it goes from oldest to newest
1520 * reference entry
1521 */
1522 sort(history->entries, history->len, sizeof(*history->entries),
1523 topology_ref_history_cmp, NULL);
1524
1525 drm_printf(&p, "%s (%p) topology count reached 0, dumping history:\n",
1526 type_str, ptr);
1527
1528 for (i = 0; i < history->len; i++) {
1529 const struct drm_dp_mst_topology_ref_entry *entry =
1530 &history->entries[i];
1531 ulong *entries;
1532 uint nr_entries;
1533 u64 ts_nsec = entry->ts_nsec;
1534 u32 rem_nsec = do_div(ts_nsec, 1000000000);
1535
1536 nr_entries = stack_depot_fetch(entry->backtrace, &entries);
1537 stack_trace_snprint(buf, PAGE_SIZE, entries, nr_entries, 4);
1538
1539 drm_printf(&p, " %d %ss (last at %5llu.%06u):\n%s",
1540 entry->count,
1541 topology_ref_type_to_str(entry->type),
1542 ts_nsec, rem_nsec / 1000, buf);
1543 }
1544
1545 /* Now free the history, since this is the only time we expose it */
1546 kfree(history->entries);
1547out:
1548 kfree(buf);
1549}
1550
1551static __always_inline void
1552drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb)
1553{
1554 __dump_topology_ref_history(&mstb->topology_ref_history, mstb,
1555 "MSTB");
1556}
1557
1558static __always_inline void
1559drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port)
1560{
1561 __dump_topology_ref_history(&port->topology_ref_history, port,
1562 "Port");
1563}
1564
1565static __always_inline void
1566save_mstb_topology_ref(struct drm_dp_mst_branch *mstb,
1567 enum drm_dp_mst_topology_ref_type type)
1568{
1569 __topology_ref_save(mstb->mgr, &mstb->topology_ref_history, type);
1570}
1571
1572static __always_inline void
1573save_port_topology_ref(struct drm_dp_mst_port *port,
1574 enum drm_dp_mst_topology_ref_type type)
1575{
1576 __topology_ref_save(port->mgr, &port->topology_ref_history, type);
1577}
1578
1579static inline void
1580topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr)
1581{
1582 mutex_lock(&mgr->topology_ref_history_lock);
1583}
1584
1585static inline void
1586topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr)
1587{
1588 mutex_unlock(&mgr->topology_ref_history_lock);
1589}
1590#else
1591static inline void
1592topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr) {}
1593static inline void
1594topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr) {}
1595static inline void
1596drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb) {}
1597static inline void
1598drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port) {}
1599#define save_mstb_topology_ref(mstb, type)
1600#define save_port_topology_ref(port, type)
1601#endif
1602
1603static void drm_dp_destroy_mst_branch_device(struct kref *kref)
1604{
1605 struct drm_dp_mst_branch *mstb =
1606 container_of(kref, struct drm_dp_mst_branch, topology_kref);
1607 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
1608
1609 drm_dp_mst_dump_mstb_topology_history(mstb);
1610
1611 INIT_LIST_HEAD(&mstb->destroy_next);
1612
1613 /*
1614 * This can get called under mgr->mutex, so we need to perform the
1615 * actual destruction of the mstb in another worker
1616 */
1617 mutex_lock(&mgr->delayed_destroy_lock);
1618 list_add(&mstb->destroy_next, &mgr->destroy_branch_device_list);
1619 mutex_unlock(&mgr->delayed_destroy_lock);
1620 schedule_work(&mgr->delayed_destroy_work);
1621}
1622
1623/**
1624 * drm_dp_mst_topology_try_get_mstb() - Increment the topology refcount of a
1625 * branch device unless it's zero
1626 * @mstb: &struct drm_dp_mst_branch to increment the topology refcount of
1627 *
1628 * Attempts to grab a topology reference to @mstb, if it hasn't yet been
1629 * removed from the topology (e.g. &drm_dp_mst_branch.topology_kref has
1630 * reached 0). Holding a topology reference implies that a malloc reference
1631 * will be held to @mstb as long as the user holds the topology reference.
1632 *
1633 * Care should be taken to ensure that the user has at least one malloc
1634 * reference to @mstb. If you already have a topology reference to @mstb, you
1635 * should use drm_dp_mst_topology_get_mstb() instead.
1636 *
1637 * See also:
1638 * drm_dp_mst_topology_get_mstb()
1639 * drm_dp_mst_topology_put_mstb()
1640 *
1641 * Returns:
1642 * * 1: A topology reference was grabbed successfully
1643 * * 0: @port is no longer in the topology, no reference was grabbed
1644 */
1645static int __must_check
1646drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)
1647{
1648 int ret;
1649
1650 topology_ref_history_lock(mstb->mgr);
1651 ret = kref_get_unless_zero(&mstb->topology_kref);
1652 if (ret) {
1653 DRM_DEBUG("mstb %p (%d)\n",
1654 mstb, kref_read(&mstb->topology_kref));
1655 save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
1656 }
1657
1658 topology_ref_history_unlock(mstb->mgr);
1659
1660 return ret;
1661}
1662
1663/**
1664 * drm_dp_mst_topology_get_mstb() - Increment the topology refcount of a
1665 * branch device
1666 * @mstb: The &struct drm_dp_mst_branch to increment the topology refcount of
1667 *
1668 * Increments &drm_dp_mst_branch.topology_refcount without checking whether or
1669 * not it's already reached 0. This is only valid to use in scenarios where
1670 * you are already guaranteed to have at least one active topology reference
1671 * to @mstb. Otherwise, drm_dp_mst_topology_try_get_mstb() must be used.
1672 *
1673 * See also:
1674 * drm_dp_mst_topology_try_get_mstb()
1675 * drm_dp_mst_topology_put_mstb()
1676 */
1677static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)
1678{
1679 topology_ref_history_lock(mstb->mgr);
1680
1681 save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
1682 WARN_ON(kref_read(&mstb->topology_kref) == 0);
1683 kref_get(&mstb->topology_kref);
1684 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
1685
1686 topology_ref_history_unlock(mstb->mgr);
1687}
1688
1689/**
1690 * drm_dp_mst_topology_put_mstb() - release a topology reference to a branch
1691 * device
1692 * @mstb: The &struct drm_dp_mst_branch to release the topology reference from
1693 *
1694 * Releases a topology reference from @mstb by decrementing
1695 * &drm_dp_mst_branch.topology_kref.
1696 *
1697 * See also:
1698 * drm_dp_mst_topology_try_get_mstb()
1699 * drm_dp_mst_topology_get_mstb()
1700 */
1701static void
1702drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb)
1703{
1704 topology_ref_history_lock(mstb->mgr);
1705
1706 DRM_DEBUG("mstb %p (%d)\n",
1707 mstb, kref_read(&mstb->topology_kref) - 1);
1708 save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_PUT);
1709
1710 topology_ref_history_unlock(mstb->mgr);
1711 kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device);
1712}
1713
1714static void drm_dp_destroy_port(struct kref *kref)
1715{
1716 struct drm_dp_mst_port *port =
1717 container_of(kref, struct drm_dp_mst_port, topology_kref);
1718 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
1719
1720 drm_dp_mst_dump_port_topology_history(port);
1721
1722 /* There's nothing that needs locking to destroy an input port yet */
1723 if (port->input) {
1724 drm_dp_mst_put_port_malloc(port);
1725 return;
1726 }
1727
1728 kfree(port->cached_edid);
1729
1730 /*
1731 * we can't destroy the connector here, as we might be holding the
1732 * mode_config.mutex from an EDID retrieval
1733 */
1734 mutex_lock(&mgr->delayed_destroy_lock);
1735 list_add(&port->next, &mgr->destroy_port_list);
1736 mutex_unlock(&mgr->delayed_destroy_lock);
1737 schedule_work(&mgr->delayed_destroy_work);
1738}
1739
1740/**
1741 * drm_dp_mst_topology_try_get_port() - Increment the topology refcount of a
1742 * port unless it's zero
1743 * @port: &struct drm_dp_mst_port to increment the topology refcount of
1744 *
1745 * Attempts to grab a topology reference to @port, if it hasn't yet been
1746 * removed from the topology (e.g. &drm_dp_mst_port.topology_kref has reached
1747 * 0). Holding a topology reference implies that a malloc reference will be
1748 * held to @port as long as the user holds the topology reference.
1749 *
1750 * Care should be taken to ensure that the user has at least one malloc
1751 * reference to @port. If you already have a topology reference to @port, you
1752 * should use drm_dp_mst_topology_get_port() instead.
1753 *
1754 * See also:
1755 * drm_dp_mst_topology_get_port()
1756 * drm_dp_mst_topology_put_port()
1757 *
1758 * Returns:
1759 * * 1: A topology reference was grabbed successfully
1760 * * 0: @port is no longer in the topology, no reference was grabbed
1761 */
1762static int __must_check
1763drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)
1764{
1765 int ret;
1766
1767 topology_ref_history_lock(port->mgr);
1768 ret = kref_get_unless_zero(&port->topology_kref);
1769 if (ret) {
1770 DRM_DEBUG("port %p (%d)\n",
1771 port, kref_read(&port->topology_kref));
1772 save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
1773 }
1774
1775 topology_ref_history_unlock(port->mgr);
1776 return ret;
1777}
1778
1779/**
1780 * drm_dp_mst_topology_get_port() - Increment the topology refcount of a port
1781 * @port: The &struct drm_dp_mst_port to increment the topology refcount of
1782 *
1783 * Increments &drm_dp_mst_port.topology_refcount without checking whether or
1784 * not it's already reached 0. This is only valid to use in scenarios where
1785 * you are already guaranteed to have at least one active topology reference
1786 * to @port. Otherwise, drm_dp_mst_topology_try_get_port() must be used.
1787 *
1788 * See also:
1789 * drm_dp_mst_topology_try_get_port()
1790 * drm_dp_mst_topology_put_port()
1791 */
1792static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)
1793{
1794 topology_ref_history_lock(port->mgr);
1795
1796 WARN_ON(kref_read(&port->topology_kref) == 0);
1797 kref_get(&port->topology_kref);
1798 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->topology_kref));
1799 save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
1800
1801 topology_ref_history_unlock(port->mgr);
1802}
1803
1804/**
1805 * drm_dp_mst_topology_put_port() - release a topology reference to a port
1806 * @port: The &struct drm_dp_mst_port to release the topology reference from
1807 *
1808 * Releases a topology reference from @port by decrementing
1809 * &drm_dp_mst_port.topology_kref.
1810 *
1811 * See also:
1812 * drm_dp_mst_topology_try_get_port()
1813 * drm_dp_mst_topology_get_port()
1814 */
1815static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port)
1816{
1817 topology_ref_history_lock(port->mgr);
1818
1819 DRM_DEBUG("port %p (%d)\n",
1820 port, kref_read(&port->topology_kref) - 1);
1821 save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_PUT);
1822
1823 topology_ref_history_unlock(port->mgr);
1824 kref_put(&port->topology_kref, drm_dp_destroy_port);
1825}
1826
1827static struct drm_dp_mst_branch *
1828drm_dp_mst_topology_get_mstb_validated_locked(struct drm_dp_mst_branch *mstb,
1829 struct drm_dp_mst_branch *to_find)
1830{
1831 struct drm_dp_mst_port *port;
1832 struct drm_dp_mst_branch *rmstb;
1833
1834 if (to_find == mstb)
1835 return mstb;
1836
1837 list_for_each_entry(port, &mstb->ports, next) {
1838 if (port->mstb) {
1839 rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1840 port->mstb, to_find);
1841 if (rmstb)
1842 return rmstb;
1843 }
1844 }
1845 return NULL;
1846}
1847
1848static struct drm_dp_mst_branch *
1849drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr *mgr,
1850 struct drm_dp_mst_branch *mstb)
1851{
1852 struct drm_dp_mst_branch *rmstb = NULL;
1853
1854 mutex_lock(&mgr->lock);
1855 if (mgr->mst_primary) {
1856 rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1857 mgr->mst_primary, mstb);
1858
1859 if (rmstb && !drm_dp_mst_topology_try_get_mstb(rmstb))
1860 rmstb = NULL;
1861 }
1862 mutex_unlock(&mgr->lock);
1863 return rmstb;
1864}
1865
1866static struct drm_dp_mst_port *
1867drm_dp_mst_topology_get_port_validated_locked(struct drm_dp_mst_branch *mstb,
1868 struct drm_dp_mst_port *to_find)
1869{
1870 struct drm_dp_mst_port *port, *mport;
1871
1872 list_for_each_entry(port, &mstb->ports, next) {
1873 if (port == to_find)
1874 return port;
1875
1876 if (port->mstb) {
1877 mport = drm_dp_mst_topology_get_port_validated_locked(
1878 port->mstb, to_find);
1879 if (mport)
1880 return mport;
1881 }
1882 }
1883 return NULL;
1884}
1885
1886static struct drm_dp_mst_port *
1887drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr *mgr,
1888 struct drm_dp_mst_port *port)
1889{
1890 struct drm_dp_mst_port *rport = NULL;
1891
1892 mutex_lock(&mgr->lock);
1893 if (mgr->mst_primary) {
1894 rport = drm_dp_mst_topology_get_port_validated_locked(
1895 mgr->mst_primary, port);
1896
1897 if (rport && !drm_dp_mst_topology_try_get_port(rport))
1898 rport = NULL;
1899 }
1900 mutex_unlock(&mgr->lock);
1901 return rport;
1902}
1903
1904static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
1905{
1906 struct drm_dp_mst_port *port;
1907 int ret;
1908
1909 list_for_each_entry(port, &mstb->ports, next) {
1910 if (port->port_num == port_num) {
1911 ret = drm_dp_mst_topology_try_get_port(port);
1912 return ret ? port : NULL;
1913 }
1914 }
1915
1916 return NULL;
1917}
1918
1919/*
1920 * calculate a new RAD for this MST branch device
1921 * if parent has an LCT of 2 then it has 1 nibble of RAD,
1922 * if parent has an LCT of 3 then it has 2 nibbles of RAD,
1923 */
1924static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
1925 u8 *rad)
1926{
1927 int parent_lct = port->parent->lct;
1928 int shift = 4;
1929 int idx = (parent_lct - 1) / 2;
1930 if (parent_lct > 1) {
1931 memcpy(rad, port->parent->rad, idx + 1);
1932 shift = (parent_lct % 2) ? 4 : 0;
1933 } else
1934 rad[0] = 0;
1935
1936 rad[idx] |= port->port_num << shift;
1937 return parent_lct + 1;
1938}
1939
1940static bool drm_dp_mst_is_end_device(u8 pdt, bool mcs)
1941{
1942 switch (pdt) {
1943 case DP_PEER_DEVICE_DP_LEGACY_CONV:
1944 case DP_PEER_DEVICE_SST_SINK:
1945 return true;
1946 case DP_PEER_DEVICE_MST_BRANCHING:
1947 /* For sst branch device */
1948 if (!mcs)
1949 return true;
1950
1951 return false;
1952 }
1953 return true;
1954}
1955
1956static int
1957drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt,
1958 bool new_mcs)
1959{
1960 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
1961 struct drm_dp_mst_branch *mstb;
1962 u8 rad[8], lct;
1963 int ret = 0;
1964
1965 if (port->pdt == new_pdt && port->mcs == new_mcs)
1966 return 0;
1967
1968 /* Teardown the old pdt, if there is one */
1969 if (port->pdt != DP_PEER_DEVICE_NONE) {
1970 if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
1971 /*
1972 * If the new PDT would also have an i2c bus,
1973 * don't bother with reregistering it
1974 */
1975 if (new_pdt != DP_PEER_DEVICE_NONE &&
1976 drm_dp_mst_is_end_device(new_pdt, new_mcs)) {
1977 port->pdt = new_pdt;
1978 port->mcs = new_mcs;
1979 return 0;
1980 }
1981
1982 /* remove i2c over sideband */
1983 drm_dp_mst_unregister_i2c_bus(&port->aux);
1984 } else {
1985 mutex_lock(&mgr->lock);
1986 drm_dp_mst_topology_put_mstb(port->mstb);
1987 port->mstb = NULL;
1988 mutex_unlock(&mgr->lock);
1989 }
1990 }
1991
1992 port->pdt = new_pdt;
1993 port->mcs = new_mcs;
1994
1995 if (port->pdt != DP_PEER_DEVICE_NONE) {
1996 if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
1997 /* add i2c over sideband */
1998 ret = drm_dp_mst_register_i2c_bus(&port->aux);
1999 } else {
2000 lct = drm_dp_calculate_rad(port, rad);
2001 mstb = drm_dp_add_mst_branch_device(lct, rad);
2002 if (!mstb) {
2003 ret = -ENOMEM;
2004 DRM_ERROR("Failed to create MSTB for port %p",
2005 port);
2006 goto out;
2007 }
2008
2009 mutex_lock(&mgr->lock);
2010 port->mstb = mstb;
2011 mstb->mgr = port->mgr;
2012 mstb->port_parent = port;
2013
2014 /*
2015 * Make sure this port's memory allocation stays
2016 * around until its child MSTB releases it
2017 */
2018 drm_dp_mst_get_port_malloc(port);
2019 mutex_unlock(&mgr->lock);
2020
2021 /* And make sure we send a link address for this */
2022 ret = 1;
2023 }
2024 }
2025
2026out:
2027 if (ret < 0)
2028 port->pdt = DP_PEER_DEVICE_NONE;
2029 return ret;
2030}
2031
2032/**
2033 * drm_dp_mst_dpcd_read() - read a series of bytes from the DPCD via sideband
2034 * @aux: Fake sideband AUX CH
2035 * @offset: address of the (first) register to read
2036 * @buffer: buffer to store the register values
2037 * @size: number of bytes in @buffer
2038 *
2039 * Performs the same functionality for remote devices via
2040 * sideband messaging as drm_dp_dpcd_read() does for local
2041 * devices via actual AUX CH.
2042 *
2043 * Return: Number of bytes read, or negative error code on failure.
2044 */
2045ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
2046 unsigned int offset, void *buffer, size_t size)
2047{
2048 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
2049 aux);
2050
2051 return drm_dp_send_dpcd_read(port->mgr, port,
2052 offset, size, buffer);
2053}
2054
2055/**
2056 * drm_dp_mst_dpcd_write() - write a series of bytes to the DPCD via sideband
2057 * @aux: Fake sideband AUX CH
2058 * @offset: address of the (first) register to write
2059 * @buffer: buffer containing the values to write
2060 * @size: number of bytes in @buffer
2061 *
2062 * Performs the same functionality for remote devices via
2063 * sideband messaging as drm_dp_dpcd_write() does for local
2064 * devices via actual AUX CH.
2065 *
2066 * Return: number of bytes written on success, negative error code on failure.
2067 */
2068ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
2069 unsigned int offset, void *buffer, size_t size)
2070{
2071 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
2072 aux);
2073
2074 return drm_dp_send_dpcd_write(port->mgr, port,
2075 offset, size, buffer);
2076}
2077
2078static int drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
2079{
2080 int ret = 0;
2081
2082 memcpy(mstb->guid, guid, 16);
2083
2084 if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
2085 if (mstb->port_parent) {
2086 ret = drm_dp_send_dpcd_write(mstb->mgr,
2087 mstb->port_parent,
2088 DP_GUID, 16, mstb->guid);
2089 } else {
2090 ret = drm_dp_dpcd_write(mstb->mgr->aux,
2091 DP_GUID, mstb->guid, 16);
2092 }
2093 }
2094
2095 if (ret < 16 && ret > 0)
2096 return -EPROTO;
2097
2098 return ret == 16 ? 0 : ret;
2099}
2100
2101static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
2102 int pnum,
2103 char *proppath,
2104 size_t proppath_size)
2105{
2106 int i;
2107 char temp[8];
2108 snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
2109 for (i = 0; i < (mstb->lct - 1); i++) {
2110 int shift = (i % 2) ? 0 : 4;
2111 int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
2112 snprintf(temp, sizeof(temp), "-%d", port_num);
2113 strlcat(proppath, temp, proppath_size);
2114 }
2115 snprintf(temp, sizeof(temp), "-%d", pnum);
2116 strlcat(proppath, temp, proppath_size);
2117}
2118
2119/**
2120 * drm_dp_mst_connector_late_register() - Late MST connector registration
2121 * @connector: The MST connector
2122 * @port: The MST port for this connector
2123 *
2124 * Helper to register the remote aux device for this MST port. Drivers should
2125 * call this from their mst connector's late_register hook to enable MST aux
2126 * devices.
2127 *
2128 * Return: 0 on success, negative error code on failure.
2129 */
2130int drm_dp_mst_connector_late_register(struct drm_connector *connector,
2131 struct drm_dp_mst_port *port)
2132{
2133 DRM_DEBUG_KMS("registering %s remote bus for %s\n",
2134 port->aux.name, connector->kdev->kobj.name);
2135
2136 port->aux.dev = connector->kdev;
2137 return drm_dp_aux_register_devnode(&port->aux);
2138}
2139EXPORT_SYMBOL(drm_dp_mst_connector_late_register);
2140
2141/**
2142 * drm_dp_mst_connector_early_unregister() - Early MST connector unregistration
2143 * @connector: The MST connector
2144 * @port: The MST port for this connector
2145 *
2146 * Helper to unregister the remote aux device for this MST port, registered by
2147 * drm_dp_mst_connector_late_register(). Drivers should call this from their mst
2148 * connector's early_unregister hook.
2149 */
2150void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
2151 struct drm_dp_mst_port *port)
2152{
2153 DRM_DEBUG_KMS("unregistering %s remote bus for %s\n",
2154 port->aux.name, connector->kdev->kobj.name);
2155 drm_dp_aux_unregister_devnode(&port->aux);
2156}
2157EXPORT_SYMBOL(drm_dp_mst_connector_early_unregister);
2158
2159static void
2160drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
2161 struct drm_dp_mst_port *port)
2162{
2163 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
2164 char proppath[255];
2165 int ret;
2166
2167 build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
2168 port->connector = mgr->cbs->add_connector(mgr, port, proppath);
2169 if (!port->connector) {
2170 ret = -ENOMEM;
2171 goto error;
2172 }
2173
2174 if (port->pdt != DP_PEER_DEVICE_NONE &&
2175 drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
2176 port->cached_edid = drm_get_edid(port->connector,
2177 &port->aux.ddc);
2178 drm_connector_set_tile_property(port->connector);
2179 }
2180
2181 drm_connector_register(port->connector);
2182 return;
2183
2184error:
2185 DRM_ERROR("Failed to create connector for port %p: %d\n", port, ret);
2186}
2187
2188/*
2189 * Drop a topology reference, and unlink the port from the in-memory topology
2190 * layout
2191 */
2192static void
2193drm_dp_mst_topology_unlink_port(struct drm_dp_mst_topology_mgr *mgr,
2194 struct drm_dp_mst_port *port)
2195{
2196 mutex_lock(&mgr->lock);
2197 port->parent->num_ports--;
2198 list_del(&port->next);
2199 mutex_unlock(&mgr->lock);
2200 drm_dp_mst_topology_put_port(port);
2201}
2202
2203static struct drm_dp_mst_port *
2204drm_dp_mst_add_port(struct drm_device *dev,
2205 struct drm_dp_mst_topology_mgr *mgr,
2206 struct drm_dp_mst_branch *mstb, u8 port_number)
2207{
2208 struct drm_dp_mst_port *port = kzalloc(sizeof(*port), GFP_KERNEL);
2209
2210 if (!port)
2211 return NULL;
2212
2213 kref_init(&port->topology_kref);
2214 kref_init(&port->malloc_kref);
2215 port->parent = mstb;
2216 port->port_num = port_number;
2217 port->mgr = mgr;
2218 port->aux.name = "DPMST";
2219 port->aux.dev = dev->dev;
2220 port->aux.is_remote = true;
2221
2222 /* initialize the MST downstream port's AUX crc work queue */
2223 drm_dp_remote_aux_init(&port->aux);
2224
2225 /*
2226 * Make sure the memory allocation for our parent branch stays
2227 * around until our own memory allocation is released
2228 */
2229 drm_dp_mst_get_mstb_malloc(mstb);
2230
2231 return port;
2232}
2233
2234static int
2235drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
2236 struct drm_device *dev,
2237 struct drm_dp_link_addr_reply_port *port_msg)
2238{
2239 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
2240 struct drm_dp_mst_port *port;
2241 int old_ddps = 0, ret;
2242 u8 new_pdt = DP_PEER_DEVICE_NONE;
2243 bool new_mcs = 0;
2244 bool created = false, send_link_addr = false, changed = false;
2245
2246 port = drm_dp_get_port(mstb, port_msg->port_number);
2247 if (!port) {
2248 port = drm_dp_mst_add_port(dev, mgr, mstb,
2249 port_msg->port_number);
2250 if (!port)
2251 return -ENOMEM;
2252 created = true;
2253 changed = true;
2254 } else if (!port->input && port_msg->input_port && port->connector) {
2255 /* Since port->connector can't be changed here, we create a
2256 * new port if input_port changes from 0 to 1
2257 */
2258 drm_dp_mst_topology_unlink_port(mgr, port);
2259 drm_dp_mst_topology_put_port(port);
2260 port = drm_dp_mst_add_port(dev, mgr, mstb,
2261 port_msg->port_number);
2262 if (!port)
2263 return -ENOMEM;
2264 changed = true;
2265 created = true;
2266 } else if (port->input && !port_msg->input_port) {
2267 changed = true;
2268 } else if (port->connector) {
2269 /* We're updating a port that's exposed to userspace, so do it
2270 * under lock
2271 */
2272 drm_modeset_lock(&mgr->base.lock, NULL);
2273
2274 old_ddps = port->ddps;
2275 changed = port->ddps != port_msg->ddps ||
2276 (port->ddps &&
2277 (port->ldps != port_msg->legacy_device_plug_status ||
2278 port->dpcd_rev != port_msg->dpcd_revision ||
2279 port->mcs != port_msg->mcs ||
2280 port->pdt != port_msg->peer_device_type ||
2281 port->num_sdp_stream_sinks !=
2282 port_msg->num_sdp_stream_sinks));
2283 }
2284
2285 port->input = port_msg->input_port;
2286 if (!port->input)
2287 new_pdt = port_msg->peer_device_type;
2288 new_mcs = port_msg->mcs;
2289 port->ddps = port_msg->ddps;
2290 port->ldps = port_msg->legacy_device_plug_status;
2291 port->dpcd_rev = port_msg->dpcd_revision;
2292 port->num_sdp_streams = port_msg->num_sdp_streams;
2293 port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
2294
2295 /* manage mstb port lists with mgr lock - take a reference
2296 for this list */
2297 if (created) {
2298 mutex_lock(&mgr->lock);
2299 drm_dp_mst_topology_get_port(port);
2300 list_add(&port->next, &mstb->ports);
2301 mstb->num_ports++;
2302 mutex_unlock(&mgr->lock);
2303 }
2304
2305 /*
2306 * Reprobe PBN caps on both hotplug, and when re-probing the link
2307 * for our parent mstb
2308 */
2309 if (old_ddps != port->ddps || !created) {
2310 if (port->ddps && !port->input) {
2311 ret = drm_dp_send_enum_path_resources(mgr, mstb,
2312 port);
2313 if (ret == 1)
2314 changed = true;
2315 } else {
2316 port->full_pbn = 0;
2317 }
2318 }
2319
2320 ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
2321 if (ret == 1) {
2322 send_link_addr = true;
2323 } else if (ret < 0) {
2324 DRM_ERROR("Failed to change PDT on port %p: %d\n",
2325 port, ret);
2326 goto fail;
2327 }
2328
2329 /*
2330 * If this port wasn't just created, then we're reprobing because
2331 * we're coming out of suspend. In this case, always resend the link
2332 * address if there's an MSTB on this port
2333 */
2334 if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
2335 port->mcs)
2336 send_link_addr = true;
2337
2338 if (port->connector)
2339 drm_modeset_unlock(&mgr->base.lock);
2340 else if (!port->input)
2341 drm_dp_mst_port_add_connector(mstb, port);
2342
2343 if (send_link_addr && port->mstb) {
2344 ret = drm_dp_send_link_address(mgr, port->mstb);
2345 if (ret == 1) /* MSTB below us changed */
2346 changed = true;
2347 else if (ret < 0)
2348 goto fail_put;
2349 }
2350
2351 /* put reference to this port */
2352 drm_dp_mst_topology_put_port(port);
2353 return changed;
2354
2355fail:
2356 drm_dp_mst_topology_unlink_port(mgr, port);
2357 if (port->connector)
2358 drm_modeset_unlock(&mgr->base.lock);
2359fail_put:
2360 drm_dp_mst_topology_put_port(port);
2361 return ret;
2362}
2363
2364static void
2365drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
2366 struct drm_dp_connection_status_notify *conn_stat)
2367{
2368 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
2369 struct drm_dp_mst_port *port;
2370 int old_ddps, old_input, ret, i;
2371 u8 new_pdt;
2372 bool new_mcs;
2373 bool dowork = false, create_connector = false;
2374
2375 port = drm_dp_get_port(mstb, conn_stat->port_number);
2376 if (!port)
2377 return;
2378
2379 if (port->connector) {
2380 if (!port->input && conn_stat->input_port) {
2381 /*
2382 * We can't remove a connector from an already exposed
2383 * port, so just throw the port out and make sure we
2384 * reprobe the link address of it's parent MSTB
2385 */
2386 drm_dp_mst_topology_unlink_port(mgr, port);
2387 mstb->link_address_sent = false;
2388 dowork = true;
2389 goto out;
2390 }
2391
2392 /* Locking is only needed if the port's exposed to userspace */
2393 drm_modeset_lock(&mgr->base.lock, NULL);
2394 } else if (port->input && !conn_stat->input_port) {
2395 create_connector = true;
2396 /* Reprobe link address so we get num_sdp_streams */
2397 mstb->link_address_sent = false;
2398 dowork = true;
2399 }
2400
2401 old_ddps = port->ddps;
2402 old_input = port->input;
2403 port->input = conn_stat->input_port;
2404 port->ldps = conn_stat->legacy_device_plug_status;
2405 port->ddps = conn_stat->displayport_device_plug_status;
2406
2407 if (old_ddps != port->ddps) {
2408 if (port->ddps && !port->input)
2409 drm_dp_send_enum_path_resources(mgr, mstb, port);
2410 else
2411 port->full_pbn = 0;
2412 }
2413
2414 new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type;
2415 new_mcs = conn_stat->message_capability_status;
2416 ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
2417 if (ret == 1) {
2418 dowork = true;
2419 } else if (ret < 0) {
2420 DRM_ERROR("Failed to change PDT for port %p: %d\n",
2421 port, ret);
2422 dowork = false;
2423 }
2424
2425 if (!old_input && old_ddps != port->ddps && !port->ddps) {
2426 for (i = 0; i < mgr->max_payloads; i++) {
2427 struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
2428 struct drm_dp_mst_port *port_validated;
2429
2430 if (!vcpi)
2431 continue;
2432
2433 port_validated =
2434 container_of(vcpi, struct drm_dp_mst_port, vcpi);
2435 port_validated =
2436 drm_dp_mst_topology_get_port_validated(mgr, port_validated);
2437 if (!port_validated) {
2438 mutex_lock(&mgr->payload_lock);
2439 vcpi->num_slots = 0;
2440 mutex_unlock(&mgr->payload_lock);
2441 } else {
2442 drm_dp_mst_topology_put_port(port_validated);
2443 }
2444 }
2445 }
2446
2447 if (port->connector)
2448 drm_modeset_unlock(&mgr->base.lock);
2449 else if (create_connector)
2450 drm_dp_mst_port_add_connector(mstb, port);
2451
2452out:
2453 drm_dp_mst_topology_put_port(port);
2454 if (dowork)
2455 queue_work(system_long_wq, &mstb->mgr->work);
2456}
2457
2458static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
2459 u8 lct, u8 *rad)
2460{
2461 struct drm_dp_mst_branch *mstb;
2462 struct drm_dp_mst_port *port;
2463 int i, ret;
2464 /* find the port by iterating down */
2465
2466 mutex_lock(&mgr->lock);
2467 mstb = mgr->mst_primary;
2468
2469 if (!mstb)
2470 goto out;
2471
2472 for (i = 0; i < lct - 1; i++) {
2473 int shift = (i % 2) ? 0 : 4;
2474 int port_num = (rad[i / 2] >> shift) & 0xf;
2475
2476 list_for_each_entry(port, &mstb->ports, next) {
2477 if (port->port_num == port_num) {
2478 mstb = port->mstb;
2479 if (!mstb) {
2480 DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
2481 goto out;
2482 }
2483
2484 break;
2485 }
2486 }
2487 }
2488 ret = drm_dp_mst_topology_try_get_mstb(mstb);
2489 if (!ret)
2490 mstb = NULL;
2491out:
2492 mutex_unlock(&mgr->lock);
2493 return mstb;
2494}
2495
2496static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
2497 struct drm_dp_mst_branch *mstb,
2498 const uint8_t *guid)
2499{
2500 struct drm_dp_mst_branch *found_mstb;
2501 struct drm_dp_mst_port *port;
2502
2503 if (memcmp(mstb->guid, guid, 16) == 0)
2504 return mstb;
2505
2506
2507 list_for_each_entry(port, &mstb->ports, next) {
2508 if (!port->mstb)
2509 continue;
2510
2511 found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
2512
2513 if (found_mstb)
2514 return found_mstb;
2515 }
2516
2517 return NULL;
2518}
2519
2520static struct drm_dp_mst_branch *
2521drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,
2522 const uint8_t *guid)
2523{
2524 struct drm_dp_mst_branch *mstb;
2525 int ret;
2526
2527 /* find the port by iterating down */
2528 mutex_lock(&mgr->lock);
2529
2530 mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
2531 if (mstb) {
2532 ret = drm_dp_mst_topology_try_get_mstb(mstb);
2533 if (!ret)
2534 mstb = NULL;
2535 }
2536
2537 mutex_unlock(&mgr->lock);
2538 return mstb;
2539}
2540
2541static int drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
2542 struct drm_dp_mst_branch *mstb)
2543{
2544 struct drm_dp_mst_port *port;
2545 int ret;
2546 bool changed = false;
2547
2548 if (!mstb->link_address_sent) {
2549 ret = drm_dp_send_link_address(mgr, mstb);
2550 if (ret == 1)
2551 changed = true;
2552 else if (ret < 0)
2553 return ret;
2554 }
2555
2556 list_for_each_entry(port, &mstb->ports, next) {
2557 struct drm_dp_mst_branch *mstb_child = NULL;
2558
2559 if (port->input || !port->ddps)
2560 continue;
2561
2562 if (port->mstb)
2563 mstb_child = drm_dp_mst_topology_get_mstb_validated(
2564 mgr, port->mstb);
2565
2566 if (mstb_child) {
2567 ret = drm_dp_check_and_send_link_address(mgr,
2568 mstb_child);
2569 drm_dp_mst_topology_put_mstb(mstb_child);
2570 if (ret == 1)
2571 changed = true;
2572 else if (ret < 0)
2573 return ret;
2574 }
2575 }
2576
2577 return changed;
2578}
2579
2580static void drm_dp_mst_link_probe_work(struct work_struct *work)
2581{
2582 struct drm_dp_mst_topology_mgr *mgr =
2583 container_of(work, struct drm_dp_mst_topology_mgr, work);
2584 struct drm_device *dev = mgr->dev;
2585 struct drm_dp_mst_branch *mstb;
2586 int ret;
2587 bool clear_payload_id_table;
2588
2589 mutex_lock(&mgr->probe_lock);
2590
2591 mutex_lock(&mgr->lock);
2592 clear_payload_id_table = !mgr->payload_id_table_cleared;
2593 mgr->payload_id_table_cleared = true;
2594
2595 mstb = mgr->mst_primary;
2596 if (mstb) {
2597 ret = drm_dp_mst_topology_try_get_mstb(mstb);
2598 if (!ret)
2599 mstb = NULL;
2600 }
2601 mutex_unlock(&mgr->lock);
2602 if (!mstb) {
2603 mutex_unlock(&mgr->probe_lock);
2604 return;
2605 }
2606
2607 /*
2608 * Certain branch devices seem to incorrectly report an available_pbn
2609 * of 0 on downstream sinks, even after clearing the
2610 * DP_PAYLOAD_ALLOCATE_* registers in
2611 * drm_dp_mst_topology_mgr_set_mst(). Namely, the CableMatters USB-C
2612 * 2x DP hub. Sending a CLEAR_PAYLOAD_ID_TABLE message seems to make
2613 * things work again.
2614 */
2615 if (clear_payload_id_table) {
2616 DRM_DEBUG_KMS("Clearing payload ID table\n");
2617 drm_dp_send_clear_payload_id_table(mgr, mstb);
2618 }
2619
2620 ret = drm_dp_check_and_send_link_address(mgr, mstb);
2621 drm_dp_mst_topology_put_mstb(mstb);
2622
2623 mutex_unlock(&mgr->probe_lock);
2624 if (ret)
2625 drm_kms_helper_hotplug_event(dev);
2626}
2627
2628static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
2629 u8 *guid)
2630{
2631 u64 salt;
2632
2633 if (memchr_inv(guid, 0, 16))
2634 return true;
2635
2636 salt = get_jiffies_64();
2637
2638 memcpy(&guid[0], &salt, sizeof(u64));
2639 memcpy(&guid[8], &salt, sizeof(u64));
2640
2641 return false;
2642}
2643
2644static void build_dpcd_read(struct drm_dp_sideband_msg_tx *msg,
2645 u8 port_num, u32 offset, u8 num_bytes)
2646{
2647 struct drm_dp_sideband_msg_req_body req;
2648
2649 req.req_type = DP_REMOTE_DPCD_READ;
2650 req.u.dpcd_read.port_number = port_num;
2651 req.u.dpcd_read.dpcd_address = offset;
2652 req.u.dpcd_read.num_bytes = num_bytes;
2653 drm_dp_encode_sideband_req(&req, msg);
2654}
2655
2656static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
2657 bool up, u8 *msg, int len)
2658{
2659 int ret;
2660 int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
2661 int tosend, total, offset;
2662 int retries = 0;
2663
2664retry:
2665 total = len;
2666 offset = 0;
2667 do {
2668 tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
2669
2670 ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
2671 &msg[offset],
2672 tosend);
2673 if (ret != tosend) {
2674 if (ret == -EIO && retries < 5) {
2675 retries++;
2676 goto retry;
2677 }
2678 DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
2679
2680 return -EIO;
2681 }
2682 offset += tosend;
2683 total -= tosend;
2684 } while (total > 0);
2685 return 0;
2686}
2687
2688static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
2689 struct drm_dp_sideband_msg_tx *txmsg)
2690{
2691 struct drm_dp_mst_branch *mstb = txmsg->dst;
2692 u8 req_type;
2693
2694 /* both msg slots are full */
2695 if (txmsg->seqno == -1) {
2696 if (mstb->tx_slots[0] && mstb->tx_slots[1]) {
2697 DRM_DEBUG_KMS("%s: failed to find slot\n", __func__);
2698 return -EAGAIN;
2699 }
2700 if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) {
2701 txmsg->seqno = mstb->last_seqno;
2702 mstb->last_seqno ^= 1;
2703 } else if (mstb->tx_slots[0] == NULL)
2704 txmsg->seqno = 0;
2705 else
2706 txmsg->seqno = 1;
2707 mstb->tx_slots[txmsg->seqno] = txmsg;
2708 }
2709
2710 req_type = txmsg->msg[0] & 0x7f;
2711 if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
2712 req_type == DP_RESOURCE_STATUS_NOTIFY)
2713 hdr->broadcast = 1;
2714 else
2715 hdr->broadcast = 0;
2716 hdr->path_msg = txmsg->path_msg;
2717 hdr->lct = mstb->lct;
2718 hdr->lcr = mstb->lct - 1;
2719 if (mstb->lct > 1)
2720 memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
2721 hdr->seqno = txmsg->seqno;
2722 return 0;
2723}
2724/*
2725 * process a single block of the next message in the sideband queue
2726 */
2727static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
2728 struct drm_dp_sideband_msg_tx *txmsg,
2729 bool up)
2730{
2731 u8 chunk[48];
2732 struct drm_dp_sideband_msg_hdr hdr;
2733 int len, space, idx, tosend;
2734 int ret;
2735
2736 memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
2737
2738 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) {
2739 txmsg->seqno = -1;
2740 txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
2741 }
2742
2743 /* make hdr from dst mst - for replies use seqno
2744 otherwise assign one */
2745 ret = set_hdr_from_dst_qlock(&hdr, txmsg);
2746 if (ret < 0)
2747 return ret;
2748
2749 /* amount left to send in this message */
2750 len = txmsg->cur_len - txmsg->cur_offset;
2751
2752 /* 48 - sideband msg size - 1 byte for data CRC, x header bytes */
2753 space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
2754
2755 tosend = min(len, space);
2756 if (len == txmsg->cur_len)
2757 hdr.somt = 1;
2758 if (space >= len)
2759 hdr.eomt = 1;
2760
2761
2762 hdr.msg_len = tosend + 1;
2763 drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
2764 memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
2765 /* add crc at end */
2766 drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
2767 idx += tosend + 1;
2768
2769 ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
2770 if (unlikely(ret) && drm_debug_enabled(DRM_UT_DP)) {
2771 struct drm_printer p = drm_debug_printer(DBG_PREFIX);
2772
2773 drm_printf(&p, "sideband msg failed to send\n");
2774 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
2775 return ret;
2776 }
2777
2778 txmsg->cur_offset += tosend;
2779 if (txmsg->cur_offset == txmsg->cur_len) {
2780 txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
2781 return 1;
2782 }
2783 return 0;
2784}
2785
2786static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
2787{
2788 struct drm_dp_sideband_msg_tx *txmsg;
2789 int ret;
2790
2791 WARN_ON(!mutex_is_locked(&mgr->qlock));
2792
2793 /* construct a chunk from the first msg in the tx_msg queue */
2794 if (list_empty(&mgr->tx_msg_downq))
2795 return;
2796
2797 txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
2798 ret = process_single_tx_qlock(mgr, txmsg, false);
2799 if (ret == 1) {
2800 /* txmsg is sent it should be in the slots now */
2801 mgr->is_waiting_for_dwn_reply = true;
2802 list_del(&txmsg->next);
2803 } else if (ret) {
2804 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
2805 mgr->is_waiting_for_dwn_reply = false;
2806 list_del(&txmsg->next);
2807 if (txmsg->seqno != -1)
2808 txmsg->dst->tx_slots[txmsg->seqno] = NULL;
2809 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
2810 wake_up_all(&mgr->tx_waitq);
2811 }
2812}
2813
2814/* called holding qlock */
2815static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
2816 struct drm_dp_sideband_msg_tx *txmsg)
2817{
2818 int ret;
2819
2820 /* construct a chunk from the first msg in the tx_msg queue */
2821 ret = process_single_tx_qlock(mgr, txmsg, true);
2822
2823 if (ret != 1)
2824 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
2825
2826 if (txmsg->seqno != -1) {
2827 WARN_ON((unsigned int)txmsg->seqno >
2828 ARRAY_SIZE(txmsg->dst->tx_slots));
2829 txmsg->dst->tx_slots[txmsg->seqno] = NULL;
2830 }
2831}
2832
2833static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
2834 struct drm_dp_sideband_msg_tx *txmsg)
2835{
2836 mutex_lock(&mgr->qlock);
2837 list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
2838
2839 if (drm_debug_enabled(DRM_UT_DP)) {
2840 struct drm_printer p = drm_debug_printer(DBG_PREFIX);
2841
2842 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
2843 }
2844
2845 if (list_is_singular(&mgr->tx_msg_downq) &&
2846 !mgr->is_waiting_for_dwn_reply)
2847 process_single_down_tx_qlock(mgr);
2848 mutex_unlock(&mgr->qlock);
2849}
2850
2851static void
2852drm_dp_dump_link_address(struct drm_dp_link_address_ack_reply *reply)
2853{
2854 struct drm_dp_link_addr_reply_port *port_reply;
2855 int i;
2856
2857 for (i = 0; i < reply->nports; i++) {
2858 port_reply = &reply->ports[i];
2859 DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n",
2860 i,
2861 port_reply->input_port,
2862 port_reply->peer_device_type,
2863 port_reply->port_number,
2864 port_reply->dpcd_revision,
2865 port_reply->mcs,
2866 port_reply->ddps,
2867 port_reply->legacy_device_plug_status,
2868 port_reply->num_sdp_streams,
2869 port_reply->num_sdp_stream_sinks);
2870 }
2871}
2872
2873static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
2874 struct drm_dp_mst_branch *mstb)
2875{
2876 struct drm_dp_sideband_msg_tx *txmsg;
2877 struct drm_dp_link_address_ack_reply *reply;
2878 struct drm_dp_mst_port *port, *tmp;
2879 int i, ret, port_mask = 0;
2880 bool changed = false;
2881
2882 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2883 if (!txmsg)
2884 return -ENOMEM;
2885
2886 txmsg->dst = mstb;
2887 build_link_address(txmsg);
2888
2889 mstb->link_address_sent = true;
2890 drm_dp_queue_down_tx(mgr, txmsg);
2891
2892 /* FIXME: Actually do some real error handling here */
2893 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2894 if (ret <= 0) {
2895 DRM_ERROR("Sending link address failed with %d\n", ret);
2896 goto out;
2897 }
2898 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
2899 DRM_ERROR("link address NAK received\n");
2900 ret = -EIO;
2901 goto out;
2902 }
2903
2904 reply = &txmsg->reply.u.link_addr;
2905 DRM_DEBUG_KMS("link address reply: %d\n", reply->nports);
2906 drm_dp_dump_link_address(reply);
2907
2908 ret = drm_dp_check_mstb_guid(mstb, reply->guid);
2909 if (ret) {
2910 char buf[64];
2911
2912 drm_dp_mst_rad_to_str(mstb->rad, mstb->lct, buf, sizeof(buf));
2913 DRM_ERROR("GUID check on %s failed: %d\n",
2914 buf, ret);
2915 goto out;
2916 }
2917
2918 for (i = 0; i < reply->nports; i++) {
2919 port_mask |= BIT(reply->ports[i].port_number);
2920 ret = drm_dp_mst_handle_link_address_port(mstb, mgr->dev,
2921 &reply->ports[i]);
2922 if (ret == 1)
2923 changed = true;
2924 else if (ret < 0)
2925 goto out;
2926 }
2927
2928 /* Prune any ports that are currently a part of mstb in our in-memory
2929 * topology, but were not seen in this link address. Usually this
2930 * means that they were removed while the topology was out of sync,
2931 * e.g. during suspend/resume
2932 */
2933 mutex_lock(&mgr->lock);
2934 list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
2935 if (port_mask & BIT(port->port_num))
2936 continue;
2937
2938 DRM_DEBUG_KMS("port %d was not in link address, removing\n",
2939 port->port_num);
2940 list_del(&port->next);
2941 drm_dp_mst_topology_put_port(port);
2942 changed = true;
2943 }
2944 mutex_unlock(&mgr->lock);
2945
2946out:
2947 if (ret <= 0)
2948 mstb->link_address_sent = false;
2949 kfree(txmsg);
2950 return ret < 0 ? ret : changed;
2951}
2952
2953void drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
2954 struct drm_dp_mst_branch *mstb)
2955{
2956 struct drm_dp_sideband_msg_tx *txmsg;
2957 int ret;
2958
2959 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2960 if (!txmsg)
2961 return;
2962
2963 txmsg->dst = mstb;
2964 build_clear_payload_id_table(txmsg);
2965
2966 drm_dp_queue_down_tx(mgr, txmsg);
2967
2968 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2969 if (ret > 0 && txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
2970 DRM_DEBUG_KMS("clear payload table id nak received\n");
2971
2972 kfree(txmsg);
2973}
2974
2975static int
2976drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
2977 struct drm_dp_mst_branch *mstb,
2978 struct drm_dp_mst_port *port)
2979{
2980 struct drm_dp_enum_path_resources_ack_reply *path_res;
2981 struct drm_dp_sideband_msg_tx *txmsg;
2982 int ret;
2983
2984 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2985 if (!txmsg)
2986 return -ENOMEM;
2987
2988 txmsg->dst = mstb;
2989 build_enum_path_resources(txmsg, port->port_num);
2990
2991 drm_dp_queue_down_tx(mgr, txmsg);
2992
2993 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2994 if (ret > 0) {
2995 ret = 0;
2996 path_res = &txmsg->reply.u.path_resources;
2997
2998 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
2999 DRM_DEBUG_KMS("enum path resources nak received\n");
3000 } else {
3001 if (port->port_num != path_res->port_number)
3002 DRM_ERROR("got incorrect port in response\n");
3003
3004 DRM_DEBUG_KMS("enum path resources %d: %d %d\n",
3005 path_res->port_number,
3006 path_res->full_payload_bw_number,
3007 path_res->avail_payload_bw_number);
3008
3009 /*
3010 * If something changed, make sure we send a
3011 * hotplug
3012 */
3013 if (port->full_pbn != path_res->full_payload_bw_number ||
3014 port->fec_capable != path_res->fec_capable)
3015 ret = 1;
3016
3017 port->full_pbn = path_res->full_payload_bw_number;
3018 port->fec_capable = path_res->fec_capable;
3019 }
3020 }
3021
3022 kfree(txmsg);
3023 return ret;
3024}
3025
3026static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
3027{
3028 if (!mstb->port_parent)
3029 return NULL;
3030
3031 if (mstb->port_parent->mstb != mstb)
3032 return mstb->port_parent;
3033
3034 return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
3035}
3036
3037/*
3038 * Searches upwards in the topology starting from mstb to try to find the
3039 * closest available parent of mstb that's still connected to the rest of the
3040 * topology. This can be used in order to perform operations like releasing
3041 * payloads, where the branch device which owned the payload may no longer be
3042 * around and thus would require that the payload on the last living relative
3043 * be freed instead.
3044 */
3045static struct drm_dp_mst_branch *
3046drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
3047 struct drm_dp_mst_branch *mstb,
3048 int *port_num)
3049{
3050 struct drm_dp_mst_branch *rmstb = NULL;
3051 struct drm_dp_mst_port *found_port;
3052
3053 mutex_lock(&mgr->lock);
3054 if (!mgr->mst_primary)
3055 goto out;
3056
3057 do {
3058 found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
3059 if (!found_port)
3060 break;
3061
3062 if (drm_dp_mst_topology_try_get_mstb(found_port->parent)) {
3063 rmstb = found_port->parent;
3064 *port_num = found_port->port_num;
3065 } else {
3066 /* Search again, starting from this parent */
3067 mstb = found_port->parent;
3068 }
3069 } while (!rmstb);
3070out:
3071 mutex_unlock(&mgr->lock);
3072 return rmstb;
3073}
3074
3075static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
3076 struct drm_dp_mst_port *port,
3077 int id,
3078 int pbn)
3079{
3080 struct drm_dp_sideband_msg_tx *txmsg;
3081 struct drm_dp_mst_branch *mstb;
3082 int ret, port_num;
3083 u8 sinks[DRM_DP_MAX_SDP_STREAMS];
3084 int i;
3085
3086 port_num = port->port_num;
3087 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3088 if (!mstb) {
3089 mstb = drm_dp_get_last_connected_port_and_mstb(mgr,
3090 port->parent,
3091 &port_num);
3092
3093 if (!mstb)
3094 return -EINVAL;
3095 }
3096
3097 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3098 if (!txmsg) {
3099 ret = -ENOMEM;
3100 goto fail_put;
3101 }
3102
3103 for (i = 0; i < port->num_sdp_streams; i++)
3104 sinks[i] = i;
3105
3106 txmsg->dst = mstb;
3107 build_allocate_payload(txmsg, port_num,
3108 id,
3109 pbn, port->num_sdp_streams, sinks);
3110
3111 drm_dp_queue_down_tx(mgr, txmsg);
3112
3113 /*
3114 * FIXME: there is a small chance that between getting the last
3115 * connected mstb and sending the payload message, the last connected
3116 * mstb could also be removed from the topology. In the future, this
3117 * needs to be fixed by restarting the
3118 * drm_dp_get_last_connected_port_and_mstb() search in the event of a
3119 * timeout if the topology is still connected to the system.
3120 */
3121 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3122 if (ret > 0) {
3123 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3124 ret = -EINVAL;
3125 else
3126 ret = 0;
3127 }
3128 kfree(txmsg);
3129fail_put:
3130 drm_dp_mst_topology_put_mstb(mstb);
3131 return ret;
3132}
3133
3134int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
3135 struct drm_dp_mst_port *port, bool power_up)
3136{
3137 struct drm_dp_sideband_msg_tx *txmsg;
3138 int ret;
3139
3140 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3141 if (!port)
3142 return -EINVAL;
3143
3144 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3145 if (!txmsg) {
3146 drm_dp_mst_topology_put_port(port);
3147 return -ENOMEM;
3148 }
3149
3150 txmsg->dst = port->parent;
3151 build_power_updown_phy(txmsg, port->port_num, power_up);
3152 drm_dp_queue_down_tx(mgr, txmsg);
3153
3154 ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg);
3155 if (ret > 0) {
3156 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3157 ret = -EINVAL;
3158 else
3159 ret = 0;
3160 }
3161 kfree(txmsg);
3162 drm_dp_mst_topology_put_port(port);
3163
3164 return ret;
3165}
3166EXPORT_SYMBOL(drm_dp_send_power_updown_phy);
3167
3168static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
3169 int id,
3170 struct drm_dp_payload *payload)
3171{
3172 int ret;
3173
3174 ret = drm_dp_dpcd_write_payload(mgr, id, payload);
3175 if (ret < 0) {
3176 payload->payload_state = 0;
3177 return ret;
3178 }
3179 payload->payload_state = DP_PAYLOAD_LOCAL;
3180 return 0;
3181}
3182
3183static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
3184 struct drm_dp_mst_port *port,
3185 int id,
3186 struct drm_dp_payload *payload)
3187{
3188 int ret;
3189 ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
3190 if (ret < 0)
3191 return ret;
3192 payload->payload_state = DP_PAYLOAD_REMOTE;
3193 return ret;
3194}
3195
3196static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
3197 struct drm_dp_mst_port *port,
3198 int id,
3199 struct drm_dp_payload *payload)
3200{
3201 DRM_DEBUG_KMS("\n");
3202 /* it's okay for these to fail */
3203 if (port) {
3204 drm_dp_payload_send_msg(mgr, port, id, 0);
3205 }
3206
3207 drm_dp_dpcd_write_payload(mgr, id, payload);
3208 payload->payload_state = DP_PAYLOAD_DELETE_LOCAL;
3209 return 0;
3210}
3211
3212static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
3213 int id,
3214 struct drm_dp_payload *payload)
3215{
3216 payload->payload_state = 0;
3217 return 0;
3218}
3219
3220/**
3221 * drm_dp_update_payload_part1() - Execute payload update part 1
3222 * @mgr: manager to use.
3223 *
3224 * This iterates over all proposed virtual channels, and tries to
3225 * allocate space in the link for them. For 0->slots transitions,
3226 * this step just writes the VCPI to the MST device. For slots->0
3227 * transitions, this writes the updated VCPIs and removes the
3228 * remote VC payloads.
3229 *
3230 * after calling this the driver should generate ACT and payload
3231 * packets.
3232 */
3233int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
3234{
3235 struct drm_dp_payload req_payload;
3236 struct drm_dp_mst_port *port;
3237 int i, j;
3238 int cur_slots = 1;
3239
3240 mutex_lock(&mgr->payload_lock);
3241 for (i = 0; i < mgr->max_payloads; i++) {
3242 struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
3243 struct drm_dp_payload *payload = &mgr->payloads[i];
3244 bool put_port = false;
3245
3246 /* solve the current payloads - compare to the hw ones
3247 - update the hw view */
3248 req_payload.start_slot = cur_slots;
3249 if (vcpi) {
3250 port = container_of(vcpi, struct drm_dp_mst_port,
3251 vcpi);
3252
3253 /* Validated ports don't matter if we're releasing
3254 * VCPI
3255 */
3256 if (vcpi->num_slots) {
3257 port = drm_dp_mst_topology_get_port_validated(
3258 mgr, port);
3259 if (!port) {
3260 mutex_unlock(&mgr->payload_lock);
3261 return -EINVAL;
3262 }
3263 put_port = true;
3264 }
3265
3266 req_payload.num_slots = vcpi->num_slots;
3267 req_payload.vcpi = vcpi->vcpi;
3268 } else {
3269 port = NULL;
3270 req_payload.num_slots = 0;
3271 }
3272
3273 payload->start_slot = req_payload.start_slot;
3274 /* work out what is required to happen with this payload */
3275 if (payload->num_slots != req_payload.num_slots) {
3276
3277 /* need to push an update for this payload */
3278 if (req_payload.num_slots) {
3279 drm_dp_create_payload_step1(mgr, vcpi->vcpi,
3280 &req_payload);
3281 payload->num_slots = req_payload.num_slots;
3282 payload->vcpi = req_payload.vcpi;
3283
3284 } else if (payload->num_slots) {
3285 payload->num_slots = 0;
3286 drm_dp_destroy_payload_step1(mgr, port,
3287 payload->vcpi,
3288 payload);
3289 req_payload.payload_state =
3290 payload->payload_state;
3291 payload->start_slot = 0;
3292 }
3293 payload->payload_state = req_payload.payload_state;
3294 }
3295 cur_slots += req_payload.num_slots;
3296
3297 if (put_port)
3298 drm_dp_mst_topology_put_port(port);
3299 }
3300
3301 for (i = 0; i < mgr->max_payloads; /* do nothing */) {
3302 if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL) {
3303 i++;
3304 continue;
3305 }
3306
3307 DRM_DEBUG_KMS("removing payload %d\n", i);
3308 for (j = i; j < mgr->max_payloads - 1; j++) {
3309 mgr->payloads[j] = mgr->payloads[j + 1];
3310 mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
3311
3312 if (mgr->proposed_vcpis[j] &&
3313 mgr->proposed_vcpis[j]->num_slots) {
3314 set_bit(j + 1, &mgr->payload_mask);
3315 } else {
3316 clear_bit(j + 1, &mgr->payload_mask);
3317 }
3318 }
3319
3320 memset(&mgr->payloads[mgr->max_payloads - 1], 0,
3321 sizeof(struct drm_dp_payload));
3322 mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
3323 clear_bit(mgr->max_payloads, &mgr->payload_mask);
3324 }
3325 mutex_unlock(&mgr->payload_lock);
3326
3327 return 0;
3328}
3329EXPORT_SYMBOL(drm_dp_update_payload_part1);
3330
3331/**
3332 * drm_dp_update_payload_part2() - Execute payload update part 2
3333 * @mgr: manager to use.
3334 *
3335 * This iterates over all proposed virtual channels, and tries to
3336 * allocate space in the link for them. For 0->slots transitions,
3337 * this step writes the remote VC payload commands. For slots->0
3338 * this just resets some internal state.
3339 */
3340int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
3341{
3342 struct drm_dp_mst_port *port;
3343 int i;
3344 int ret = 0;
3345 mutex_lock(&mgr->payload_lock);
3346 for (i = 0; i < mgr->max_payloads; i++) {
3347
3348 if (!mgr->proposed_vcpis[i])
3349 continue;
3350
3351 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
3352
3353 DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
3354 if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
3355 ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
3356 } else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
3357 ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
3358 }
3359 if (ret) {
3360 mutex_unlock(&mgr->payload_lock);
3361 return ret;
3362 }
3363 }
3364 mutex_unlock(&mgr->payload_lock);
3365 return 0;
3366}
3367EXPORT_SYMBOL(drm_dp_update_payload_part2);
3368
3369static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
3370 struct drm_dp_mst_port *port,
3371 int offset, int size, u8 *bytes)
3372{
3373 int ret = 0;
3374 struct drm_dp_sideband_msg_tx *txmsg;
3375 struct drm_dp_mst_branch *mstb;
3376
3377 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3378 if (!mstb)
3379 return -EINVAL;
3380
3381 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3382 if (!txmsg) {
3383 ret = -ENOMEM;
3384 goto fail_put;
3385 }
3386
3387 build_dpcd_read(txmsg, port->port_num, offset, size);
3388 txmsg->dst = port->parent;
3389
3390 drm_dp_queue_down_tx(mgr, txmsg);
3391
3392 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3393 if (ret < 0)
3394 goto fail_free;
3395
3396 /* DPCD read should never be NACKed */
3397 if (txmsg->reply.reply_type == 1) {
3398 DRM_ERROR("mstb %p port %d: DPCD read on addr 0x%x for %d bytes NAKed\n",
3399 mstb, port->port_num, offset, size);
3400 ret = -EIO;
3401 goto fail_free;
3402 }
3403
3404 if (txmsg->reply.u.remote_dpcd_read_ack.num_bytes != size) {
3405 ret = -EPROTO;
3406 goto fail_free;
3407 }
3408
3409 ret = min_t(size_t, txmsg->reply.u.remote_dpcd_read_ack.num_bytes,
3410 size);
3411 memcpy(bytes, txmsg->reply.u.remote_dpcd_read_ack.bytes, ret);
3412
3413fail_free:
3414 kfree(txmsg);
3415fail_put:
3416 drm_dp_mst_topology_put_mstb(mstb);
3417
3418 return ret;
3419}
3420
3421static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
3422 struct drm_dp_mst_port *port,
3423 int offset, int size, u8 *bytes)
3424{
3425 int ret;
3426 struct drm_dp_sideband_msg_tx *txmsg;
3427 struct drm_dp_mst_branch *mstb;
3428
3429 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3430 if (!mstb)
3431 return -EINVAL;
3432
3433 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3434 if (!txmsg) {
3435 ret = -ENOMEM;
3436 goto fail_put;
3437 }
3438
3439 build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
3440 txmsg->dst = mstb;
3441
3442 drm_dp_queue_down_tx(mgr, txmsg);
3443
3444 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3445 if (ret > 0) {
3446 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3447 ret = -EIO;
3448 else
3449 ret = size;
3450 }
3451
3452 kfree(txmsg);
3453fail_put:
3454 drm_dp_mst_topology_put_mstb(mstb);
3455 return ret;
3456}
3457
3458static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
3459{
3460 struct drm_dp_sideband_msg_reply_body reply;
3461
3462 reply.reply_type = DP_SIDEBAND_REPLY_ACK;
3463 reply.req_type = req_type;
3464 drm_dp_encode_sideband_reply(&reply, msg);
3465 return 0;
3466}
3467
3468static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
3469 struct drm_dp_mst_branch *mstb,
3470 int req_type, int seqno, bool broadcast)
3471{
3472 struct drm_dp_sideband_msg_tx *txmsg;
3473
3474 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3475 if (!txmsg)
3476 return -ENOMEM;
3477
3478 txmsg->dst = mstb;
3479 txmsg->seqno = seqno;
3480 drm_dp_encode_up_ack_reply(txmsg, req_type);
3481
3482 mutex_lock(&mgr->qlock);
3483
3484 process_single_up_tx_qlock(mgr, txmsg);
3485
3486 mutex_unlock(&mgr->qlock);
3487
3488 kfree(txmsg);
3489 return 0;
3490}
3491
3492static int drm_dp_get_vc_payload_bw(u8 dp_link_bw, u8 dp_link_count)
3493{
3494 if (dp_link_bw == 0 || dp_link_count == 0)
3495 DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
3496 dp_link_bw, dp_link_count);
3497
3498 return dp_link_bw * dp_link_count / 2;
3499}
3500
3501/**
3502 * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
3503 * @mgr: manager to set state for
3504 * @mst_state: true to enable MST on this connector - false to disable.
3505 *
3506 * This is called by the driver when it detects an MST capable device plugged
3507 * into a DP MST capable port, or when a DP MST capable device is unplugged.
3508 */
3509int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
3510{
3511 int ret = 0;
3512 struct drm_dp_mst_branch *mstb = NULL;
3513
3514 mutex_lock(&mgr->payload_lock);
3515 mutex_lock(&mgr->lock);
3516 if (mst_state == mgr->mst_state)
3517 goto out_unlock;
3518
3519 mgr->mst_state = mst_state;
3520 /* set the device into MST mode */
3521 if (mst_state) {
3522 struct drm_dp_payload reset_pay;
3523
3524 WARN_ON(mgr->mst_primary);
3525
3526 /* get dpcd info */
3527 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
3528 if (ret != DP_RECEIVER_CAP_SIZE) {
3529 DRM_DEBUG_KMS("failed to read DPCD\n");
3530 goto out_unlock;
3531 }
3532
3533 mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr->dpcd[1],
3534 mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK);
3535 if (mgr->pbn_div == 0) {
3536 ret = -EINVAL;
3537 goto out_unlock;
3538 }
3539
3540 /* add initial branch device at LCT 1 */
3541 mstb = drm_dp_add_mst_branch_device(1, NULL);
3542 if (mstb == NULL) {
3543 ret = -ENOMEM;
3544 goto out_unlock;
3545 }
3546 mstb->mgr = mgr;
3547
3548 /* give this the main reference */
3549 mgr->mst_primary = mstb;
3550 drm_dp_mst_topology_get_mstb(mgr->mst_primary);
3551
3552 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3553 DP_MST_EN |
3554 DP_UP_REQ_EN |
3555 DP_UPSTREAM_IS_SRC);
3556 if (ret < 0)
3557 goto out_unlock;
3558
3559 reset_pay.start_slot = 0;
3560 reset_pay.num_slots = 0x3f;
3561 drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
3562
3563 queue_work(system_long_wq, &mgr->work);
3564
3565 ret = 0;
3566 } else {
3567 /* disable MST on the device */
3568 mstb = mgr->mst_primary;
3569 mgr->mst_primary = NULL;
3570 /* this can fail if the device is gone */
3571 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
3572 ret = 0;
3573 memset(mgr->payloads, 0,
3574 mgr->max_payloads * sizeof(mgr->payloads[0]));
3575 memset(mgr->proposed_vcpis, 0,
3576 mgr->max_payloads * sizeof(mgr->proposed_vcpis[0]));
3577 mgr->payload_mask = 0;
3578 set_bit(0, &mgr->payload_mask);
3579 mgr->vcpi_mask = 0;
3580 mgr->payload_id_table_cleared = false;
3581 }
3582
3583out_unlock:
3584 mutex_unlock(&mgr->lock);
3585 mutex_unlock(&mgr->payload_lock);
3586 if (mstb)
3587 drm_dp_mst_topology_put_mstb(mstb);
3588 return ret;
3589
3590}
3591EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
3592
3593static void
3594drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb)
3595{
3596 struct drm_dp_mst_port *port;
3597
3598 /* The link address will need to be re-sent on resume */
3599 mstb->link_address_sent = false;
3600
3601 list_for_each_entry(port, &mstb->ports, next)
3602 if (port->mstb)
3603 drm_dp_mst_topology_mgr_invalidate_mstb(port->mstb);
3604}
3605
3606/**
3607 * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
3608 * @mgr: manager to suspend
3609 *
3610 * This function tells the MST device that we can't handle UP messages
3611 * anymore. This should stop it from sending any since we are suspended.
3612 */
3613void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
3614{
3615 mutex_lock(&mgr->lock);
3616 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3617 DP_MST_EN | DP_UPSTREAM_IS_SRC);
3618 mutex_unlock(&mgr->lock);
3619 flush_work(&mgr->up_req_work);
3620 flush_work(&mgr->work);
3621 flush_work(&mgr->delayed_destroy_work);
3622
3623 mutex_lock(&mgr->lock);
3624 if (mgr->mst_state && mgr->mst_primary)
3625 drm_dp_mst_topology_mgr_invalidate_mstb(mgr->mst_primary);
3626 mutex_unlock(&mgr->lock);
3627}
3628EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
3629
3630/**
3631 * drm_dp_mst_topology_mgr_resume() - resume the MST manager
3632 * @mgr: manager to resume
3633 * @sync: whether or not to perform topology reprobing synchronously
3634 *
3635 * This will fetch DPCD and see if the device is still there,
3636 * if it is, it will rewrite the MSTM control bits, and return.
3637 *
3638 * If the device fails this returns -1, and the driver should do
3639 * a full MST reprobe, in case we were undocked.
3640 *
3641 * During system resume (where it is assumed that the driver will be calling
3642 * drm_atomic_helper_resume()) this function should be called beforehand with
3643 * @sync set to true. In contexts like runtime resume where the driver is not
3644 * expected to be calling drm_atomic_helper_resume(), this function should be
3645 * called with @sync set to false in order to avoid deadlocking.
3646 *
3647 * Returns: -1 if the MST topology was removed while we were suspended, 0
3648 * otherwise.
3649 */
3650int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
3651 bool sync)
3652{
3653 int ret;
3654 u8 guid[16];
3655
3656 mutex_lock(&mgr->lock);
3657 if (!mgr->mst_primary)
3658 goto out_fail;
3659
3660 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd,
3661 DP_RECEIVER_CAP_SIZE);
3662 if (ret != DP_RECEIVER_CAP_SIZE) {
3663 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
3664 goto out_fail;
3665 }
3666
3667 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3668 DP_MST_EN |
3669 DP_UP_REQ_EN |
3670 DP_UPSTREAM_IS_SRC);
3671 if (ret < 0) {
3672 DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
3673 goto out_fail;
3674 }
3675
3676 /* Some hubs forget their guids after they resume */
3677 ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
3678 if (ret != 16) {
3679 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
3680 goto out_fail;
3681 }
3682
3683 ret = drm_dp_check_mstb_guid(mgr->mst_primary, guid);
3684 if (ret) {
3685 DRM_DEBUG_KMS("check mstb failed - undocked during suspend?\n");
3686 goto out_fail;
3687 }
3688
3689 /*
3690 * For the final step of resuming the topology, we need to bring the
3691 * state of our in-memory topology back into sync with reality. So,
3692 * restart the probing process as if we're probing a new hub
3693 */
3694 queue_work(system_long_wq, &mgr->work);
3695 mutex_unlock(&mgr->lock);
3696
3697 if (sync) {
3698 DRM_DEBUG_KMS("Waiting for link probe work to finish re-syncing topology...\n");
3699 flush_work(&mgr->work);
3700 }
3701
3702 return 0;
3703
3704out_fail:
3705 mutex_unlock(&mgr->lock);
3706 return -1;
3707}
3708EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
3709
3710static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
3711{
3712 int len;
3713 u8 replyblock[32];
3714 int replylen, curreply;
3715 int ret;
3716 struct drm_dp_sideband_msg_rx *msg;
3717 int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
3718 msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv;
3719
3720 len = min(mgr->max_dpcd_transaction_bytes, 16);
3721 ret = drm_dp_dpcd_read(mgr->aux, basereg,
3722 replyblock, len);
3723 if (ret != len) {
3724 DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
3725 return false;
3726 }
3727 ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
3728 if (!ret) {
3729 DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
3730 return false;
3731 }
3732 replylen = msg->curchunk_len + msg->curchunk_hdrlen;
3733
3734 replylen -= len;
3735 curreply = len;
3736 while (replylen > 0) {
3737 len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
3738 ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
3739 replyblock, len);
3740 if (ret != len) {
3741 DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n",
3742 len, ret);
3743 return false;
3744 }
3745
3746 ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
3747 if (!ret) {
3748 DRM_DEBUG_KMS("failed to build sideband msg\n");
3749 return false;
3750 }
3751
3752 curreply += len;
3753 replylen -= len;
3754 }
3755 return true;
3756}
3757
3758static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
3759{
3760 struct drm_dp_sideband_msg_tx *txmsg;
3761 struct drm_dp_mst_branch *mstb;
3762 struct drm_dp_sideband_msg_hdr *hdr = &mgr->down_rep_recv.initial_hdr;
3763 int slot = -1;
3764
3765 if (!drm_dp_get_one_sb_msg(mgr, false))
3766 goto clear_down_rep_recv;
3767
3768 if (!mgr->down_rep_recv.have_eomt)
3769 return 0;
3770
3771 mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
3772 if (!mstb) {
3773 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
3774 hdr->lct);
3775 goto clear_down_rep_recv;
3776 }
3777
3778 /* find the message */
3779 slot = hdr->seqno;
3780 mutex_lock(&mgr->qlock);
3781 txmsg = mstb->tx_slots[slot];
3782 /* remove from slots */
3783 mutex_unlock(&mgr->qlock);
3784
3785 if (!txmsg) {
3786 DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
3787 mstb, hdr->seqno, hdr->lct, hdr->rad[0],
3788 mgr->down_rep_recv.msg[0]);
3789 goto no_msg;
3790 }
3791
3792 drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
3793
3794 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3795 DRM_DEBUG_KMS("Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n",
3796 txmsg->reply.req_type,
3797 drm_dp_mst_req_type_str(txmsg->reply.req_type),
3798 txmsg->reply.u.nak.reason,
3799 drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),
3800 txmsg->reply.u.nak.nak_data);
3801
3802 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
3803 drm_dp_mst_topology_put_mstb(mstb);
3804
3805 mutex_lock(&mgr->qlock);
3806 txmsg->state = DRM_DP_SIDEBAND_TX_RX;
3807 mstb->tx_slots[slot] = NULL;
3808 mgr->is_waiting_for_dwn_reply = false;
3809 mutex_unlock(&mgr->qlock);
3810
3811 wake_up_all(&mgr->tx_waitq);
3812
3813 return 0;
3814
3815no_msg:
3816 drm_dp_mst_topology_put_mstb(mstb);
3817clear_down_rep_recv:
3818 mutex_lock(&mgr->qlock);
3819 mgr->is_waiting_for_dwn_reply = false;
3820 mutex_unlock(&mgr->qlock);
3821 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
3822
3823 return 0;
3824}
3825
3826static inline bool
3827drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
3828 struct drm_dp_pending_up_req *up_req)
3829{
3830 struct drm_dp_mst_branch *mstb = NULL;
3831 struct drm_dp_sideband_msg_req_body *msg = &up_req->msg;
3832 struct drm_dp_sideband_msg_hdr *hdr = &up_req->hdr;
3833 bool hotplug = false;
3834
3835 if (hdr->broadcast) {
3836 const u8 *guid = NULL;
3837
3838 if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY)
3839 guid = msg->u.conn_stat.guid;
3840 else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY)
3841 guid = msg->u.resource_stat.guid;
3842
3843 if (guid)
3844 mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
3845 } else {
3846 mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
3847 }
3848
3849 if (!mstb) {
3850 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
3851 hdr->lct);
3852 return false;
3853 }
3854
3855 /* TODO: Add missing handler for DP_RESOURCE_STATUS_NOTIFY events */
3856 if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) {
3857 drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat);
3858 hotplug = true;
3859 }
3860
3861 drm_dp_mst_topology_put_mstb(mstb);
3862 return hotplug;
3863}
3864
3865static void drm_dp_mst_up_req_work(struct work_struct *work)
3866{
3867 struct drm_dp_mst_topology_mgr *mgr =
3868 container_of(work, struct drm_dp_mst_topology_mgr,
3869 up_req_work);
3870 struct drm_dp_pending_up_req *up_req;
3871 bool send_hotplug = false;
3872
3873 mutex_lock(&mgr->probe_lock);
3874 while (true) {
3875 mutex_lock(&mgr->up_req_lock);
3876 up_req = list_first_entry_or_null(&mgr->up_req_list,
3877 struct drm_dp_pending_up_req,
3878 next);
3879 if (up_req)
3880 list_del(&up_req->next);
3881 mutex_unlock(&mgr->up_req_lock);
3882
3883 if (!up_req)
3884 break;
3885
3886 send_hotplug |= drm_dp_mst_process_up_req(mgr, up_req);
3887 kfree(up_req);
3888 }
3889 mutex_unlock(&mgr->probe_lock);
3890
3891 if (send_hotplug)
3892 drm_kms_helper_hotplug_event(mgr->dev);
3893}
3894
3895static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
3896{
3897 struct drm_dp_sideband_msg_hdr *hdr = &mgr->up_req_recv.initial_hdr;
3898 struct drm_dp_pending_up_req *up_req;
3899 bool seqno;
3900
3901 if (!drm_dp_get_one_sb_msg(mgr, true))
3902 goto out;
3903
3904 if (!mgr->up_req_recv.have_eomt)
3905 return 0;
3906
3907 up_req = kzalloc(sizeof(*up_req), GFP_KERNEL);
3908 if (!up_req) {
3909 DRM_ERROR("Not enough memory to process MST up req\n");
3910 return -ENOMEM;
3911 }
3912 INIT_LIST_HEAD(&up_req->next);
3913
3914 seqno = hdr->seqno;
3915 drm_dp_sideband_parse_req(&mgr->up_req_recv, &up_req->msg);
3916
3917 if (up_req->msg.req_type != DP_CONNECTION_STATUS_NOTIFY &&
3918 up_req->msg.req_type != DP_RESOURCE_STATUS_NOTIFY) {
3919 DRM_DEBUG_KMS("Received unknown up req type, ignoring: %x\n",
3920 up_req->msg.req_type);
3921 kfree(up_req);
3922 goto out;
3923 }
3924
3925 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, up_req->msg.req_type,
3926 seqno, false);
3927
3928 if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
3929 const struct drm_dp_connection_status_notify *conn_stat =
3930 &up_req->msg.u.conn_stat;
3931
3932 DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n",
3933 conn_stat->port_number,
3934 conn_stat->legacy_device_plug_status,
3935 conn_stat->displayport_device_plug_status,
3936 conn_stat->message_capability_status,
3937 conn_stat->input_port,
3938 conn_stat->peer_device_type);
3939 } else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
3940 const struct drm_dp_resource_status_notify *res_stat =
3941 &up_req->msg.u.resource_stat;
3942
3943 DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n",
3944 res_stat->port_number,
3945 res_stat->available_pbn);
3946 }
3947
3948 up_req->hdr = *hdr;
3949 mutex_lock(&mgr->up_req_lock);
3950 list_add_tail(&up_req->next, &mgr->up_req_list);
3951 mutex_unlock(&mgr->up_req_lock);
3952 queue_work(system_long_wq, &mgr->up_req_work);
3953
3954out:
3955 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
3956 return 0;
3957}
3958
3959/**
3960 * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
3961 * @mgr: manager to notify irq for.
3962 * @esi: 4 bytes from SINK_COUNT_ESI
3963 * @handled: whether the hpd interrupt was consumed or not
3964 *
3965 * This should be called from the driver when it detects a short IRQ,
3966 * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
3967 * topology manager will process the sideband messages received as a result
3968 * of this.
3969 */
3970int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
3971{
3972 int ret = 0;
3973 int sc;
3974 *handled = false;
3975 sc = esi[0] & 0x3f;
3976
3977 if (sc != mgr->sink_count) {
3978 mgr->sink_count = sc;
3979 *handled = true;
3980 }
3981
3982 if (esi[1] & DP_DOWN_REP_MSG_RDY) {
3983 ret = drm_dp_mst_handle_down_rep(mgr);
3984 *handled = true;
3985 }
3986
3987 if (esi[1] & DP_UP_REQ_MSG_RDY) {
3988 ret |= drm_dp_mst_handle_up_req(mgr);
3989 *handled = true;
3990 }
3991
3992 drm_dp_mst_kick_tx(mgr);
3993 return ret;
3994}
3995EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
3996
3997/**
3998 * drm_dp_mst_detect_port() - get connection status for an MST port
3999 * @connector: DRM connector for this port
4000 * @ctx: The acquisition context to use for grabbing locks
4001 * @mgr: manager for this port
4002 * @port: pointer to a port
4003 *
4004 * This returns the current connection state for a port.
4005 */
4006int
4007drm_dp_mst_detect_port(struct drm_connector *connector,
4008 struct drm_modeset_acquire_ctx *ctx,
4009 struct drm_dp_mst_topology_mgr *mgr,
4010 struct drm_dp_mst_port *port)
4011{
4012 int ret;
4013
4014 /* we need to search for the port in the mgr in case it's gone */
4015 port = drm_dp_mst_topology_get_port_validated(mgr, port);
4016 if (!port)
4017 return connector_status_disconnected;
4018
4019 ret = drm_modeset_lock(&mgr->base.lock, ctx);
4020 if (ret)
4021 goto out;
4022
4023 ret = connector_status_disconnected;
4024
4025 if (!port->ddps)
4026 goto out;
4027
4028 switch (port->pdt) {
4029 case DP_PEER_DEVICE_NONE:
4030 case DP_PEER_DEVICE_MST_BRANCHING:
4031 if (!port->mcs)
4032 ret = connector_status_connected;
4033 break;
4034
4035 case DP_PEER_DEVICE_SST_SINK:
4036 ret = connector_status_connected;
4037 /* for logical ports - cache the EDID */
4038 if (port->port_num >= 8 && !port->cached_edid) {
4039 port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
4040 }
4041 break;
4042 case DP_PEER_DEVICE_DP_LEGACY_CONV:
4043 if (port->ldps)
4044 ret = connector_status_connected;
4045 break;
4046 }
4047out:
4048 drm_dp_mst_topology_put_port(port);
4049 return ret;
4050}
4051EXPORT_SYMBOL(drm_dp_mst_detect_port);
4052
4053/**
4054 * drm_dp_mst_port_has_audio() - Check whether port has audio capability or not
4055 * @mgr: manager for this port
4056 * @port: unverified pointer to a port.
4057 *
4058 * This returns whether the port supports audio or not.
4059 */
4060bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr,
4061 struct drm_dp_mst_port *port)
4062{
4063 bool ret = false;
4064
4065 port = drm_dp_mst_topology_get_port_validated(mgr, port);
4066 if (!port)
4067 return ret;
4068 ret = port->has_audio;
4069 drm_dp_mst_topology_put_port(port);
4070 return ret;
4071}
4072EXPORT_SYMBOL(drm_dp_mst_port_has_audio);
4073
4074/**
4075 * drm_dp_mst_get_edid() - get EDID for an MST port
4076 * @connector: toplevel connector to get EDID for
4077 * @mgr: manager for this port
4078 * @port: unverified pointer to a port.
4079 *
4080 * This returns an EDID for the port connected to a connector,
4081 * It validates the pointer still exists so the caller doesn't require a
4082 * reference.
4083 */
4084struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
4085{
4086 struct edid *edid = NULL;
4087
4088 /* we need to search for the port in the mgr in case it's gone */
4089 port = drm_dp_mst_topology_get_port_validated(mgr, port);
4090 if (!port)
4091 return NULL;
4092
4093 if (port->cached_edid)
4094 edid = drm_edid_duplicate(port->cached_edid);
4095 else {
4096 edid = drm_get_edid(connector, &port->aux.ddc);
4097 }
4098 port->has_audio = drm_detect_monitor_audio(edid);
4099 drm_dp_mst_topology_put_port(port);
4100 return edid;
4101}
4102EXPORT_SYMBOL(drm_dp_mst_get_edid);
4103
4104/**
4105 * drm_dp_find_vcpi_slots() - Find VCPI slots for this PBN value
4106 * @mgr: manager to use
4107 * @pbn: payload bandwidth to convert into slots.
4108 *
4109 * Calculate the number of VCPI slots that will be required for the given PBN
4110 * value. This function is deprecated, and should not be used in atomic
4111 * drivers.
4112 *
4113 * RETURNS:
4114 * The total slots required for this port, or error.
4115 */
4116int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
4117 int pbn)
4118{
4119 int num_slots;
4120
4121 num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
4122
4123 /* max. time slots - one slot for MTP header */
4124 if (num_slots > 63)
4125 return -ENOSPC;
4126 return num_slots;
4127}
4128EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
4129
4130static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
4131 struct drm_dp_vcpi *vcpi, int pbn, int slots)
4132{
4133 int ret;
4134
4135 /* max. time slots - one slot for MTP header */
4136 if (slots > 63)
4137 return -ENOSPC;
4138
4139 vcpi->pbn = pbn;
4140 vcpi->aligned_pbn = slots * mgr->pbn_div;
4141 vcpi->num_slots = slots;
4142
4143 ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
4144 if (ret < 0)
4145 return ret;
4146 return 0;
4147}
4148
4149/**
4150 * drm_dp_atomic_find_vcpi_slots() - Find and add VCPI slots to the state
4151 * @state: global atomic state
4152 * @mgr: MST topology manager for the port
4153 * @port: port to find vcpi slots for
4154 * @pbn: bandwidth required for the mode in PBN
4155 * @pbn_div: divider for DSC mode that takes FEC into account
4156 *
4157 * Allocates VCPI slots to @port, replacing any previous VCPI allocations it
4158 * may have had. Any atomic drivers which support MST must call this function
4159 * in their &drm_encoder_helper_funcs.atomic_check() callback to change the
4160 * current VCPI allocation for the new state, but only when
4161 * &drm_crtc_state.mode_changed or &drm_crtc_state.connectors_changed is set
4162 * to ensure compatibility with userspace applications that still use the
4163 * legacy modesetting UAPI.
4164 *
4165 * Allocations set by this function are not checked against the bandwidth
4166 * restraints of @mgr until the driver calls drm_dp_mst_atomic_check().
4167 *
4168 * Additionally, it is OK to call this function multiple times on the same
4169 * @port as needed. It is not OK however, to call this function and
4170 * drm_dp_atomic_release_vcpi_slots() in the same atomic check phase.
4171 *
4172 * See also:
4173 * drm_dp_atomic_release_vcpi_slots()
4174 * drm_dp_mst_atomic_check()
4175 *
4176 * Returns:
4177 * Total slots in the atomic state assigned for this port, or a negative error
4178 * code if the port no longer exists
4179 */
4180int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
4181 struct drm_dp_mst_topology_mgr *mgr,
4182 struct drm_dp_mst_port *port, int pbn,
4183 int pbn_div)
4184{
4185 struct drm_dp_mst_topology_state *topology_state;
4186 struct drm_dp_vcpi_allocation *pos, *vcpi = NULL;
4187 int prev_slots, prev_bw, req_slots;
4188
4189 topology_state = drm_atomic_get_mst_topology_state(state, mgr);
4190 if (IS_ERR(topology_state))
4191 return PTR_ERR(topology_state);
4192
4193 /* Find the current allocation for this port, if any */
4194 list_for_each_entry(pos, &topology_state->vcpis, next) {
4195 if (pos->port == port) {
4196 vcpi = pos;
4197 prev_slots = vcpi->vcpi;
4198 prev_bw = vcpi->pbn;
4199
4200 /*
4201 * This should never happen, unless the driver tries
4202 * releasing and allocating the same VCPI allocation,
4203 * which is an error
4204 */
4205 if (WARN_ON(!prev_slots)) {
4206 DRM_ERROR("cannot allocate and release VCPI on [MST PORT:%p] in the same state\n",
4207 port);
4208 return -EINVAL;
4209 }
4210
4211 break;
4212 }
4213 }
4214 if (!vcpi) {
4215 prev_slots = 0;
4216 prev_bw = 0;
4217 }
4218
4219 if (pbn_div <= 0)
4220 pbn_div = mgr->pbn_div;
4221
4222 req_slots = DIV_ROUND_UP(pbn, pbn_div);
4223
4224 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d\n",
4225 port->connector->base.id, port->connector->name,
4226 port, prev_slots, req_slots);
4227 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] PBN %d -> %d\n",
4228 port->connector->base.id, port->connector->name,
4229 port, prev_bw, pbn);
4230
4231 /* Add the new allocation to the state */
4232 if (!vcpi) {
4233 vcpi = kzalloc(sizeof(*vcpi), GFP_KERNEL);
4234 if (!vcpi)
4235 return -ENOMEM;
4236
4237 drm_dp_mst_get_port_malloc(port);
4238 vcpi->port = port;
4239 list_add(&vcpi->next, &topology_state->vcpis);
4240 }
4241 vcpi->vcpi = req_slots;
4242 vcpi->pbn = pbn;
4243
4244 return req_slots;
4245}
4246EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots);
4247
4248/**
4249 * drm_dp_atomic_release_vcpi_slots() - Release allocated vcpi slots
4250 * @state: global atomic state
4251 * @mgr: MST topology manager for the port
4252 * @port: The port to release the VCPI slots from
4253 *
4254 * Releases any VCPI slots that have been allocated to a port in the atomic
4255 * state. Any atomic drivers which support MST must call this function in
4256 * their &drm_connector_helper_funcs.atomic_check() callback when the
4257 * connector will no longer have VCPI allocated (e.g. because its CRTC was
4258 * removed) when it had VCPI allocated in the previous atomic state.
4259 *
4260 * It is OK to call this even if @port has been removed from the system.
4261 * Additionally, it is OK to call this function multiple times on the same
4262 * @port as needed. It is not OK however, to call this function and
4263 * drm_dp_atomic_find_vcpi_slots() on the same @port in a single atomic check
4264 * phase.
4265 *
4266 * See also:
4267 * drm_dp_atomic_find_vcpi_slots()
4268 * drm_dp_mst_atomic_check()
4269 *
4270 * Returns:
4271 * 0 if all slots for this port were added back to
4272 * &drm_dp_mst_topology_state.avail_slots or negative error code
4273 */
4274int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
4275 struct drm_dp_mst_topology_mgr *mgr,
4276 struct drm_dp_mst_port *port)
4277{
4278 struct drm_dp_mst_topology_state *topology_state;
4279 struct drm_dp_vcpi_allocation *pos;
4280 bool found = false;
4281
4282 topology_state = drm_atomic_get_mst_topology_state(state, mgr);
4283 if (IS_ERR(topology_state))
4284 return PTR_ERR(topology_state);
4285
4286 list_for_each_entry(pos, &topology_state->vcpis, next) {
4287 if (pos->port == port) {
4288 found = true;
4289 break;
4290 }
4291 }
4292 if (WARN_ON(!found)) {
4293 DRM_ERROR("no VCPI for [MST PORT:%p] found in mst state %p\n",
4294 port, &topology_state->base);
4295 return -EINVAL;
4296 }
4297
4298 DRM_DEBUG_ATOMIC("[MST PORT:%p] VCPI %d -> 0\n", port, pos->vcpi);
4299 if (pos->vcpi) {
4300 drm_dp_mst_put_port_malloc(port);
4301 pos->vcpi = 0;
4302 pos->pbn = 0;
4303 }
4304
4305 return 0;
4306}
4307EXPORT_SYMBOL(drm_dp_atomic_release_vcpi_slots);
4308
4309/**
4310 * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
4311 * @mgr: manager for this port
4312 * @port: port to allocate a virtual channel for.
4313 * @pbn: payload bandwidth number to request
4314 * @slots: returned number of slots for this PBN.
4315 */
4316bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
4317 struct drm_dp_mst_port *port, int pbn, int slots)
4318{
4319 int ret;
4320
4321 port = drm_dp_mst_topology_get_port_validated(mgr, port);
4322 if (!port)
4323 return false;
4324
4325 if (slots < 0)
4326 return false;
4327
4328 if (port->vcpi.vcpi > 0) {
4329 DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n",
4330 port->vcpi.vcpi, port->vcpi.pbn, pbn);
4331 if (pbn == port->vcpi.pbn) {
4332 drm_dp_mst_topology_put_port(port);
4333 return true;
4334 }
4335 }
4336
4337 ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots);
4338 if (ret) {
4339 DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d\n",
4340 DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
4341 goto out;
4342 }
4343 DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n",
4344 pbn, port->vcpi.num_slots);
4345
4346 /* Keep port allocated until its payload has been removed */
4347 drm_dp_mst_get_port_malloc(port);
4348 drm_dp_mst_topology_put_port(port);
4349 return true;
4350out:
4351 return false;
4352}
4353EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
4354
4355int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
4356{
4357 int slots = 0;
4358 port = drm_dp_mst_topology_get_port_validated(mgr, port);
4359 if (!port)
4360 return slots;
4361
4362 slots = port->vcpi.num_slots;
4363 drm_dp_mst_topology_put_port(port);
4364 return slots;
4365}
4366EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
4367
4368/**
4369 * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
4370 * @mgr: manager for this port
4371 * @port: unverified pointer to a port.
4372 *
4373 * This just resets the number of slots for the ports VCPI for later programming.
4374 */
4375void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
4376{
4377 /*
4378 * A port with VCPI will remain allocated until its VCPI is
4379 * released, no verified ref needed
4380 */
4381
4382 port->vcpi.num_slots = 0;
4383}
4384EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
4385
4386/**
4387 * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
4388 * @mgr: manager for this port
4389 * @port: port to deallocate vcpi for
4390 *
4391 * This can be called unconditionally, regardless of whether
4392 * drm_dp_mst_allocate_vcpi() succeeded or not.
4393 */
4394void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
4395 struct drm_dp_mst_port *port)
4396{
4397 if (!port->vcpi.vcpi)
4398 return;
4399
4400 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
4401 port->vcpi.num_slots = 0;
4402 port->vcpi.pbn = 0;
4403 port->vcpi.aligned_pbn = 0;
4404 port->vcpi.vcpi = 0;
4405 drm_dp_mst_put_port_malloc(port);
4406}
4407EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
4408
4409static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
4410 int id, struct drm_dp_payload *payload)
4411{
4412 u8 payload_alloc[3], status;
4413 int ret;
4414 int retries = 0;
4415
4416 drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
4417 DP_PAYLOAD_TABLE_UPDATED);
4418
4419 payload_alloc[0] = id;
4420 payload_alloc[1] = payload->start_slot;
4421 payload_alloc[2] = payload->num_slots;
4422
4423 ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
4424 if (ret != 3) {
4425 DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret);
4426 goto fail;
4427 }
4428
4429retry:
4430 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
4431 if (ret < 0) {
4432 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
4433 goto fail;
4434 }
4435
4436 if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
4437 retries++;
4438 if (retries < 20) {
4439 usleep_range(10000, 20000);
4440 goto retry;
4441 }
4442 DRM_DEBUG_KMS("status not set after read payload table status %d\n", status);
4443 ret = -EINVAL;
4444 goto fail;
4445 }
4446 ret = 0;
4447fail:
4448 return ret;
4449}
4450
4451
4452/**
4453 * drm_dp_check_act_status() - Check ACT handled status.
4454 * @mgr: manager to use
4455 *
4456 * Check the payload status bits in the DPCD for ACT handled completion.
4457 */
4458int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
4459{
4460 u8 status;
4461 int ret;
4462 int count = 0;
4463
4464 do {
4465 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
4466
4467 if (ret < 0) {
4468 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
4469 goto fail;
4470 }
4471
4472 if (status & DP_PAYLOAD_ACT_HANDLED)
4473 break;
4474 count++;
4475 udelay(100);
4476
4477 } while (count < 30);
4478
4479 if (!(status & DP_PAYLOAD_ACT_HANDLED)) {
4480 DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count);
4481 ret = -EINVAL;
4482 goto fail;
4483 }
4484 return 0;
4485fail:
4486 return ret;
4487}
4488EXPORT_SYMBOL(drm_dp_check_act_status);
4489
4490/**
4491 * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
4492 * @clock: dot clock for the mode
4493 * @bpp: bpp for the mode.
4494 * @dsc: DSC mode. If true, bpp has units of 1/16 of a bit per pixel
4495 *
4496 * This uses the formula in the spec to calculate the PBN value for a mode.
4497 */
4498int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc)
4499{
4500 /*
4501 * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
4502 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
4503 * common multiplier to render an integer PBN for all link rate/lane
4504 * counts combinations
4505 * calculate
4506 * peak_kbps *= (1006/1000)
4507 * peak_kbps *= (64/54)
4508 * peak_kbps *= 8 convert to bytes
4509 *
4510 * If the bpp is in units of 1/16, further divide by 16. Put this
4511 * factor in the numerator rather than the denominator to avoid
4512 * integer overflow
4513 */
4514
4515 if (dsc)
4516 return DIV_ROUND_UP_ULL(mul_u32_u32(clock * (bpp / 16), 64 * 1006),
4517 8 * 54 * 1000 * 1000);
4518
4519 return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006),
4520 8 * 54 * 1000 * 1000);
4521}
4522EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
4523
4524/* we want to kick the TX after we've ack the up/down IRQs. */
4525static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
4526{
4527 queue_work(system_long_wq, &mgr->tx_work);
4528}
4529
4530static void drm_dp_mst_dump_mstb(struct seq_file *m,
4531 struct drm_dp_mst_branch *mstb)
4532{
4533 struct drm_dp_mst_port *port;
4534 int tabs = mstb->lct;
4535 char prefix[10];
4536 int i;
4537
4538 for (i = 0; i < tabs; i++)
4539 prefix[i] = '\t';
4540 prefix[i] = '\0';
4541
4542 seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports);
4543 list_for_each_entry(port, &mstb->ports, next) {
4544 seq_printf(m, "%sport: %d: input: %d: pdt: %d, ddps: %d ldps: %d, sdp: %d/%d, %p, conn: %p\n", prefix, port->port_num, port->input, port->pdt, port->ddps, port->ldps, port->num_sdp_streams, port->num_sdp_stream_sinks, port, port->connector);
4545 if (port->mstb)
4546 drm_dp_mst_dump_mstb(m, port->mstb);
4547 }
4548}
4549
4550#define DP_PAYLOAD_TABLE_SIZE 64
4551
4552static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
4553 char *buf)
4554{
4555 int i;
4556
4557 for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) {
4558 if (drm_dp_dpcd_read(mgr->aux,
4559 DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
4560 &buf[i], 16) != 16)
4561 return false;
4562 }
4563 return true;
4564}
4565
4566static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
4567 struct drm_dp_mst_port *port, char *name,
4568 int namelen)
4569{
4570 struct edid *mst_edid;
4571
4572 mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port);
4573 drm_edid_get_monitor_name(mst_edid, name, namelen);
4574}
4575
4576/**
4577 * drm_dp_mst_dump_topology(): dump topology to seq file.
4578 * @m: seq_file to dump output to
4579 * @mgr: manager to dump current topology for.
4580 *
4581 * helper to dump MST topology to a seq file for debugfs.
4582 */
4583void drm_dp_mst_dump_topology(struct seq_file *m,
4584 struct drm_dp_mst_topology_mgr *mgr)
4585{
4586 int i;
4587 struct drm_dp_mst_port *port;
4588
4589 mutex_lock(&mgr->lock);
4590 if (mgr->mst_primary)
4591 drm_dp_mst_dump_mstb(m, mgr->mst_primary);
4592
4593 /* dump VCPIs */
4594 mutex_unlock(&mgr->lock);
4595
4596 mutex_lock(&mgr->payload_lock);
4597 seq_printf(m, "vcpi: %lx %lx %d\n", mgr->payload_mask, mgr->vcpi_mask,
4598 mgr->max_payloads);
4599
4600 for (i = 0; i < mgr->max_payloads; i++) {
4601 if (mgr->proposed_vcpis[i]) {
4602 char name[14];
4603
4604 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
4605 fetch_monitor_name(mgr, port, name, sizeof(name));
4606 seq_printf(m, "vcpi %d: %d %d %d sink name: %s\n", i,
4607 port->port_num, port->vcpi.vcpi,
4608 port->vcpi.num_slots,
4609 (*name != 0) ? name : "Unknown");
4610 } else
4611 seq_printf(m, "vcpi %d:unused\n", i);
4612 }
4613 for (i = 0; i < mgr->max_payloads; i++) {
4614 seq_printf(m, "payload %d: %d, %d, %d\n",
4615 i,
4616 mgr->payloads[i].payload_state,
4617 mgr->payloads[i].start_slot,
4618 mgr->payloads[i].num_slots);
4619
4620
4621 }
4622 mutex_unlock(&mgr->payload_lock);
4623
4624 mutex_lock(&mgr->lock);
4625 if (mgr->mst_primary) {
4626 u8 buf[DP_PAYLOAD_TABLE_SIZE];
4627 int ret;
4628
4629 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
4630 if (ret) {
4631 seq_printf(m, "dpcd read failed\n");
4632 goto out;
4633 }
4634 seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);
4635
4636 ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
4637 if (ret) {
4638 seq_printf(m, "faux/mst read failed\n");
4639 goto out;
4640 }
4641 seq_printf(m, "faux/mst: %*ph\n", 2, buf);
4642
4643 ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
4644 if (ret) {
4645 seq_printf(m, "mst ctrl read failed\n");
4646 goto out;
4647 }
4648 seq_printf(m, "mst ctrl: %*ph\n", 1, buf);
4649
4650 /* dump the standard OUI branch header */
4651 ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
4652 if (ret) {
4653 seq_printf(m, "branch oui read failed\n");
4654 goto out;
4655 }
4656 seq_printf(m, "branch oui: %*phN devid: ", 3, buf);
4657
4658 for (i = 0x3; i < 0x8 && buf[i]; i++)
4659 seq_printf(m, "%c", buf[i]);
4660 seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n",
4661 buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
4662 if (dump_dp_payload_table(mgr, buf))
4663 seq_printf(m, "payload table: %*ph\n", DP_PAYLOAD_TABLE_SIZE, buf);
4664 }
4665
4666out:
4667 mutex_unlock(&mgr->lock);
4668
4669}
4670EXPORT_SYMBOL(drm_dp_mst_dump_topology);
4671
4672static void drm_dp_tx_work(struct work_struct *work)
4673{
4674 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
4675
4676 mutex_lock(&mgr->qlock);
4677 if (!list_empty(&mgr->tx_msg_downq) && !mgr->is_waiting_for_dwn_reply)
4678 process_single_down_tx_qlock(mgr);
4679 mutex_unlock(&mgr->qlock);
4680}
4681
4682static inline void drm_dp_destroy_connector(struct drm_dp_mst_port *port)
4683{
4684 if (!port->connector)
4685 return;
4686
4687 if (port->mgr->cbs->destroy_connector) {
4688 port->mgr->cbs->destroy_connector(port->mgr, port->connector);
4689 } else {
4690 drm_connector_unregister(port->connector);
4691 drm_connector_put(port->connector);
4692 }
4693}
4694
4695static inline void
4696drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)
4697{
4698 drm_dp_destroy_connector(port);
4699
4700 drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs);
4701 drm_dp_mst_put_port_malloc(port);
4702}
4703
4704static inline void
4705drm_dp_delayed_destroy_mstb(struct drm_dp_mst_branch *mstb)
4706{
4707 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
4708 struct drm_dp_mst_port *port, *tmp;
4709 bool wake_tx = false;
4710
4711 mutex_lock(&mgr->lock);
4712 list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
4713 list_del(&port->next);
4714 drm_dp_mst_topology_put_port(port);
4715 }
4716 mutex_unlock(&mgr->lock);
4717
4718 /* drop any tx slots msg */
4719 mutex_lock(&mstb->mgr->qlock);
4720 if (mstb->tx_slots[0]) {
4721 mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
4722 mstb->tx_slots[0] = NULL;
4723 wake_tx = true;
4724 }
4725 if (mstb->tx_slots[1]) {
4726 mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
4727 mstb->tx_slots[1] = NULL;
4728 wake_tx = true;
4729 }
4730 mutex_unlock(&mstb->mgr->qlock);
4731
4732 if (wake_tx)
4733 wake_up_all(&mstb->mgr->tx_waitq);
4734
4735 drm_dp_mst_put_mstb_malloc(mstb);
4736}
4737
4738static void drm_dp_delayed_destroy_work(struct work_struct *work)
4739{
4740 struct drm_dp_mst_topology_mgr *mgr =
4741 container_of(work, struct drm_dp_mst_topology_mgr,
4742 delayed_destroy_work);
4743 bool send_hotplug = false, go_again;
4744
4745 /*
4746 * Not a regular list traverse as we have to drop the destroy
4747 * connector lock before destroying the mstb/port, to avoid AB->BA
4748 * ordering between this lock and the config mutex.
4749 */
4750 do {
4751 go_again = false;
4752
4753 for (;;) {
4754 struct drm_dp_mst_branch *mstb;
4755
4756 mutex_lock(&mgr->delayed_destroy_lock);
4757 mstb = list_first_entry_or_null(&mgr->destroy_branch_device_list,
4758 struct drm_dp_mst_branch,
4759 destroy_next);
4760 if (mstb)
4761 list_del(&mstb->destroy_next);
4762 mutex_unlock(&mgr->delayed_destroy_lock);
4763
4764 if (!mstb)
4765 break;
4766
4767 drm_dp_delayed_destroy_mstb(mstb);
4768 go_again = true;
4769 }
4770
4771 for (;;) {
4772 struct drm_dp_mst_port *port;
4773
4774 mutex_lock(&mgr->delayed_destroy_lock);
4775 port = list_first_entry_or_null(&mgr->destroy_port_list,
4776 struct drm_dp_mst_port,
4777 next);
4778 if (port)
4779 list_del(&port->next);
4780 mutex_unlock(&mgr->delayed_destroy_lock);
4781
4782 if (!port)
4783 break;
4784
4785 drm_dp_delayed_destroy_port(port);
4786 send_hotplug = true;
4787 go_again = true;
4788 }
4789 } while (go_again);
4790
4791 if (send_hotplug)
4792 drm_kms_helper_hotplug_event(mgr->dev);
4793}
4794
4795static struct drm_private_state *
4796drm_dp_mst_duplicate_state(struct drm_private_obj *obj)
4797{
4798 struct drm_dp_mst_topology_state *state, *old_state =
4799 to_dp_mst_topology_state(obj->state);
4800 struct drm_dp_vcpi_allocation *pos, *vcpi;
4801
4802 state = kmemdup(old_state, sizeof(*state), GFP_KERNEL);
4803 if (!state)
4804 return NULL;
4805
4806 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
4807
4808 INIT_LIST_HEAD(&state->vcpis);
4809
4810 list_for_each_entry(pos, &old_state->vcpis, next) {
4811 /* Prune leftover freed VCPI allocations */
4812 if (!pos->vcpi)
4813 continue;
4814
4815 vcpi = kmemdup(pos, sizeof(*vcpi), GFP_KERNEL);
4816 if (!vcpi)
4817 goto fail;
4818
4819 drm_dp_mst_get_port_malloc(vcpi->port);
4820 list_add(&vcpi->next, &state->vcpis);
4821 }
4822
4823 return &state->base;
4824
4825fail:
4826 list_for_each_entry_safe(pos, vcpi, &state->vcpis, next) {
4827 drm_dp_mst_put_port_malloc(pos->port);
4828 kfree(pos);
4829 }
4830 kfree(state);
4831
4832 return NULL;
4833}
4834
4835static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
4836 struct drm_private_state *state)
4837{
4838 struct drm_dp_mst_topology_state *mst_state =
4839 to_dp_mst_topology_state(state);
4840 struct drm_dp_vcpi_allocation *pos, *tmp;
4841
4842 list_for_each_entry_safe(pos, tmp, &mst_state->vcpis, next) {
4843 /* We only keep references to ports with non-zero VCPIs */
4844 if (pos->vcpi)
4845 drm_dp_mst_put_port_malloc(pos->port);
4846 kfree(pos);
4847 }
4848
4849 kfree(mst_state);
4850}
4851
4852static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,
4853 struct drm_dp_mst_branch *branch)
4854{
4855 while (port->parent) {
4856 if (port->parent == branch)
4857 return true;
4858
4859 if (port->parent->port_parent)
4860 port = port->parent->port_parent;
4861 else
4862 break;
4863 }
4864 return false;
4865}
4866
4867static int
4868drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
4869 struct drm_dp_mst_topology_state *state);
4870
4871static int
4872drm_dp_mst_atomic_check_mstb_bw_limit(struct drm_dp_mst_branch *mstb,
4873 struct drm_dp_mst_topology_state *state)
4874{
4875 struct drm_dp_vcpi_allocation *vcpi;
4876 struct drm_dp_mst_port *port;
4877 int pbn_used = 0, ret;
4878 bool found = false;
4879
4880 /* Check that we have at least one port in our state that's downstream
4881 * of this branch, otherwise we can skip this branch
4882 */
4883 list_for_each_entry(vcpi, &state->vcpis, next) {
4884 if (!vcpi->pbn ||
4885 !drm_dp_mst_port_downstream_of_branch(vcpi->port, mstb))
4886 continue;
4887
4888 found = true;
4889 break;
4890 }
4891 if (!found)
4892 return 0;
4893
4894 if (mstb->port_parent)
4895 DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] Checking bandwidth limits on [MSTB:%p]\n",
4896 mstb->port_parent->parent, mstb->port_parent,
4897 mstb);
4898 else
4899 DRM_DEBUG_ATOMIC("[MSTB:%p] Checking bandwidth limits\n",
4900 mstb);
4901
4902 list_for_each_entry(port, &mstb->ports, next) {
4903 ret = drm_dp_mst_atomic_check_port_bw_limit(port, state);
4904 if (ret < 0)
4905 return ret;
4906
4907 pbn_used += ret;
4908 }
4909
4910 return pbn_used;
4911}
4912
4913static int
4914drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
4915 struct drm_dp_mst_topology_state *state)
4916{
4917 struct drm_dp_vcpi_allocation *vcpi;
4918 int pbn_used = 0;
4919
4920 if (port->pdt == DP_PEER_DEVICE_NONE)
4921 return 0;
4922
4923 if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
4924 bool found = false;
4925
4926 list_for_each_entry(vcpi, &state->vcpis, next) {
4927 if (vcpi->port != port)
4928 continue;
4929 if (!vcpi->pbn)
4930 return 0;
4931
4932 found = true;
4933 break;
4934 }
4935 if (!found)
4936 return 0;
4937
4938 /* This should never happen, as it means we tried to
4939 * set a mode before querying the full_pbn
4940 */
4941 if (WARN_ON(!port->full_pbn))
4942 return -EINVAL;
4943
4944 pbn_used = vcpi->pbn;
4945 } else {
4946 pbn_used = drm_dp_mst_atomic_check_mstb_bw_limit(port->mstb,
4947 state);
4948 if (pbn_used <= 0)
4949 return pbn_used;
4950 }
4951
4952 if (pbn_used > port->full_pbn) {
4953 DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] required PBN of %d exceeds port limit of %d\n",
4954 port->parent, port, pbn_used,
4955 port->full_pbn);
4956 return -ENOSPC;
4957 }
4958
4959 DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] uses %d out of %d PBN\n",
4960 port->parent, port, pbn_used, port->full_pbn);
4961
4962 return pbn_used;
4963}
4964
4965static inline int
4966drm_dp_mst_atomic_check_vcpi_alloc_limit(struct drm_dp_mst_topology_mgr *mgr,
4967 struct drm_dp_mst_topology_state *mst_state)
4968{
4969 struct drm_dp_vcpi_allocation *vcpi;
4970 int avail_slots = 63, payload_count = 0;
4971
4972 list_for_each_entry(vcpi, &mst_state->vcpis, next) {
4973 /* Releasing VCPI is always OK-even if the port is gone */
4974 if (!vcpi->vcpi) {
4975 DRM_DEBUG_ATOMIC("[MST PORT:%p] releases all VCPI slots\n",
4976 vcpi->port);
4977 continue;
4978 }
4979
4980 DRM_DEBUG_ATOMIC("[MST PORT:%p] requires %d vcpi slots\n",
4981 vcpi->port, vcpi->vcpi);
4982
4983 avail_slots -= vcpi->vcpi;
4984 if (avail_slots < 0) {
4985 DRM_DEBUG_ATOMIC("[MST PORT:%p] not enough VCPI slots in mst state %p (avail=%d)\n",
4986 vcpi->port, mst_state,
4987 avail_slots + vcpi->vcpi);
4988 return -ENOSPC;
4989 }
4990
4991 if (++payload_count > mgr->max_payloads) {
4992 DRM_DEBUG_ATOMIC("[MST MGR:%p] state %p has too many payloads (max=%d)\n",
4993 mgr, mst_state, mgr->max_payloads);
4994 return -EINVAL;
4995 }
4996 }
4997 DRM_DEBUG_ATOMIC("[MST MGR:%p] mst state %p VCPI avail=%d used=%d\n",
4998 mgr, mst_state, avail_slots,
4999 63 - avail_slots);
5000
5001 return 0;
5002}
5003
5004/**
5005 * drm_dp_mst_add_affected_dsc_crtcs
5006 * @state: Pointer to the new struct drm_dp_mst_topology_state
5007 * @mgr: MST topology manager
5008 *
5009 * Whenever there is a change in mst topology
5010 * DSC configuration would have to be recalculated
5011 * therefore we need to trigger modeset on all affected
5012 * CRTCs in that topology
5013 *
5014 * See also:
5015 * drm_dp_mst_atomic_enable_dsc()
5016 */
5017int drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr)
5018{
5019 struct drm_dp_mst_topology_state *mst_state;
5020 struct drm_dp_vcpi_allocation *pos;
5021 struct drm_connector *connector;
5022 struct drm_connector_state *conn_state;
5023 struct drm_crtc *crtc;
5024 struct drm_crtc_state *crtc_state;
5025
5026 mst_state = drm_atomic_get_mst_topology_state(state, mgr);
5027
5028 if (IS_ERR(mst_state))
5029 return -EINVAL;
5030
5031 list_for_each_entry(pos, &mst_state->vcpis, next) {
5032
5033 connector = pos->port->connector;
5034
5035 if (!connector)
5036 return -EINVAL;
5037
5038 conn_state = drm_atomic_get_connector_state(state, connector);
5039
5040 if (IS_ERR(conn_state))
5041 return PTR_ERR(conn_state);
5042
5043 crtc = conn_state->crtc;
5044
5045 if (WARN_ON(!crtc))
5046 return -EINVAL;
5047
5048 if (!drm_dp_mst_dsc_aux_for_port(pos->port))
5049 continue;
5050
5051 crtc_state = drm_atomic_get_crtc_state(mst_state->base.state, crtc);
5052
5053 if (IS_ERR(crtc_state))
5054 return PTR_ERR(crtc_state);
5055
5056 DRM_DEBUG_ATOMIC("[MST MGR:%p] Setting mode_changed flag on CRTC %p\n",
5057 mgr, crtc);
5058
5059 crtc_state->mode_changed = true;
5060 }
5061 return 0;
5062}
5063EXPORT_SYMBOL(drm_dp_mst_add_affected_dsc_crtcs);
5064
5065/**
5066 * drm_dp_mst_atomic_enable_dsc - Set DSC Enable Flag to On/Off
5067 * @state: Pointer to the new drm_atomic_state
5068 * @port: Pointer to the affected MST Port
5069 * @pbn: Newly recalculated bw required for link with DSC enabled
5070 * @pbn_div: Divider to calculate correct number of pbn per slot
5071 * @enable: Boolean flag to enable or disable DSC on the port
5072 *
5073 * This function enables DSC on the given Port
5074 * by recalculating its vcpi from pbn provided
5075 * and sets dsc_enable flag to keep track of which
5076 * ports have DSC enabled
5077 *
5078 */
5079int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
5080 struct drm_dp_mst_port *port,
5081 int pbn, int pbn_div,
5082 bool enable)
5083{
5084 struct drm_dp_mst_topology_state *mst_state;
5085 struct drm_dp_vcpi_allocation *pos;
5086 bool found = false;
5087 int vcpi = 0;
5088
5089 mst_state = drm_atomic_get_mst_topology_state(state, port->mgr);
5090
5091 if (IS_ERR(mst_state))
5092 return PTR_ERR(mst_state);
5093
5094 list_for_each_entry(pos, &mst_state->vcpis, next) {
5095 if (pos->port == port) {
5096 found = true;
5097 break;
5098 }
5099 }
5100
5101 if (!found) {
5102 DRM_DEBUG_ATOMIC("[MST PORT:%p] Couldn't find VCPI allocation in mst state %p\n",
5103 port, mst_state);
5104 return -EINVAL;
5105 }
5106
5107 if (pos->dsc_enabled == enable) {
5108 DRM_DEBUG_ATOMIC("[MST PORT:%p] DSC flag is already set to %d, returning %d VCPI slots\n",
5109 port, enable, pos->vcpi);
5110 vcpi = pos->vcpi;
5111 }
5112
5113 if (enable) {
5114 vcpi = drm_dp_atomic_find_vcpi_slots(state, port->mgr, port, pbn, pbn_div);
5115 DRM_DEBUG_ATOMIC("[MST PORT:%p] Enabling DSC flag, reallocating %d VCPI slots on the port\n",
5116 port, vcpi);
5117 if (vcpi < 0)
5118 return -EINVAL;
5119 }
5120
5121 pos->dsc_enabled = enable;
5122
5123 return vcpi;
5124}
5125EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc);
5126/**
5127 * drm_dp_mst_atomic_check - Check that the new state of an MST topology in an
5128 * atomic update is valid
5129 * @state: Pointer to the new &struct drm_dp_mst_topology_state
5130 *
5131 * Checks the given topology state for an atomic update to ensure that it's
5132 * valid. This includes checking whether there's enough bandwidth to support
5133 * the new VCPI allocations in the atomic update.
5134 *
5135 * Any atomic drivers supporting DP MST must make sure to call this after
5136 * checking the rest of their state in their
5137 * &drm_mode_config_funcs.atomic_check() callback.
5138 *
5139 * See also:
5140 * drm_dp_atomic_find_vcpi_slots()
5141 * drm_dp_atomic_release_vcpi_slots()
5142 *
5143 * Returns:
5144 *
5145 * 0 if the new state is valid, negative error code otherwise.
5146 */
5147int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
5148{
5149 struct drm_dp_mst_topology_mgr *mgr;
5150 struct drm_dp_mst_topology_state *mst_state;
5151 int i, ret = 0;
5152
5153 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
5154 if (!mgr->mst_state)
5155 continue;
5156
5157 ret = drm_dp_mst_atomic_check_vcpi_alloc_limit(mgr, mst_state);
5158 if (ret)
5159 break;
5160
5161 mutex_lock(&mgr->lock);
5162 ret = drm_dp_mst_atomic_check_mstb_bw_limit(mgr->mst_primary,
5163 mst_state);
5164 mutex_unlock(&mgr->lock);
5165 if (ret < 0)
5166 break;
5167 else
5168 ret = 0;
5169 }
5170
5171 return ret;
5172}
5173EXPORT_SYMBOL(drm_dp_mst_atomic_check);
5174
5175const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs = {
5176 .atomic_duplicate_state = drm_dp_mst_duplicate_state,
5177 .atomic_destroy_state = drm_dp_mst_destroy_state,
5178};
5179EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs);
5180
5181/**
5182 * drm_atomic_get_mst_topology_state: get MST topology state
5183 *
5184 * @state: global atomic state
5185 * @mgr: MST topology manager, also the private object in this case
5186 *
5187 * This function wraps drm_atomic_get_priv_obj_state() passing in the MST atomic
5188 * state vtable so that the private object state returned is that of a MST
5189 * topology object. Also, drm_atomic_get_private_obj_state() expects the caller
5190 * to care of the locking, so warn if don't hold the connection_mutex.
5191 *
5192 * RETURNS:
5193 *
5194 * The MST topology state or error pointer.
5195 */
5196struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
5197 struct drm_dp_mst_topology_mgr *mgr)
5198{
5199 return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base));
5200}
5201EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
5202
5203/**
5204 * drm_dp_mst_topology_mgr_init - initialise a topology manager
5205 * @mgr: manager struct to initialise
5206 * @dev: device providing this structure - for i2c addition.
5207 * @aux: DP helper aux channel to talk to this device
5208 * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
5209 * @max_payloads: maximum number of payloads this GPU can source
5210 * @conn_base_id: the connector object ID the MST device is connected to.
5211 *
5212 * Return 0 for success, or negative error code on failure
5213 */
5214int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
5215 struct drm_device *dev, struct drm_dp_aux *aux,
5216 int max_dpcd_transaction_bytes,
5217 int max_payloads, int conn_base_id)
5218{
5219 struct drm_dp_mst_topology_state *mst_state;
5220
5221 mutex_init(&mgr->lock);
5222 mutex_init(&mgr->qlock);
5223 mutex_init(&mgr->payload_lock);
5224 mutex_init(&mgr->delayed_destroy_lock);
5225 mutex_init(&mgr->up_req_lock);
5226 mutex_init(&mgr->probe_lock);
5227#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
5228 mutex_init(&mgr->topology_ref_history_lock);
5229#endif
5230 INIT_LIST_HEAD(&mgr->tx_msg_downq);
5231 INIT_LIST_HEAD(&mgr->destroy_port_list);
5232 INIT_LIST_HEAD(&mgr->destroy_branch_device_list);
5233 INIT_LIST_HEAD(&mgr->up_req_list);
5234 INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
5235 INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
5236 INIT_WORK(&mgr->delayed_destroy_work, drm_dp_delayed_destroy_work);
5237 INIT_WORK(&mgr->up_req_work, drm_dp_mst_up_req_work);
5238 init_waitqueue_head(&mgr->tx_waitq);
5239 mgr->dev = dev;
5240 mgr->aux = aux;
5241 mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
5242 mgr->max_payloads = max_payloads;
5243 mgr->conn_base_id = conn_base_id;
5244 if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 ||
5245 max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8)
5246 return -EINVAL;
5247 mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
5248 if (!mgr->payloads)
5249 return -ENOMEM;
5250 mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
5251 if (!mgr->proposed_vcpis)
5252 return -ENOMEM;
5253 set_bit(0, &mgr->payload_mask);
5254
5255 mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL);
5256 if (mst_state == NULL)
5257 return -ENOMEM;
5258
5259 mst_state->mgr = mgr;
5260 INIT_LIST_HEAD(&mst_state->vcpis);
5261
5262 drm_atomic_private_obj_init(dev, &mgr->base,
5263 &mst_state->base,
5264 &drm_dp_mst_topology_state_funcs);
5265
5266 return 0;
5267}
5268EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
5269
5270/**
5271 * drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
5272 * @mgr: manager to destroy
5273 */
5274void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
5275{
5276 drm_dp_mst_topology_mgr_set_mst(mgr, false);
5277 flush_work(&mgr->work);
5278 cancel_work_sync(&mgr->delayed_destroy_work);
5279 mutex_lock(&mgr->payload_lock);
5280 kfree(mgr->payloads);
5281 mgr->payloads = NULL;
5282 kfree(mgr->proposed_vcpis);
5283 mgr->proposed_vcpis = NULL;
5284 mutex_unlock(&mgr->payload_lock);
5285 mgr->dev = NULL;
5286 mgr->aux = NULL;
5287 drm_atomic_private_obj_fini(&mgr->base);
5288 mgr->funcs = NULL;
5289
5290 mutex_destroy(&mgr->delayed_destroy_lock);
5291 mutex_destroy(&mgr->payload_lock);
5292 mutex_destroy(&mgr->qlock);
5293 mutex_destroy(&mgr->lock);
5294 mutex_destroy(&mgr->up_req_lock);
5295 mutex_destroy(&mgr->probe_lock);
5296#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
5297 mutex_destroy(&mgr->topology_ref_history_lock);
5298#endif
5299}
5300EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
5301
5302static bool remote_i2c_read_ok(const struct i2c_msg msgs[], int num)
5303{
5304 int i;
5305
5306 if (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)
5307 return false;
5308
5309 for (i = 0; i < num - 1; i++) {
5310 if (msgs[i].flags & I2C_M_RD ||
5311 msgs[i].len > 0xff)
5312 return false;
5313 }
5314
5315 return msgs[num - 1].flags & I2C_M_RD &&
5316 msgs[num - 1].len <= 0xff;
5317}
5318
5319/* I2C device */
5320static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
5321 int num)
5322{
5323 struct drm_dp_aux *aux = adapter->algo_data;
5324 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux);
5325 struct drm_dp_mst_branch *mstb;
5326 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
5327 unsigned int i;
5328 struct drm_dp_sideband_msg_req_body msg;
5329 struct drm_dp_sideband_msg_tx *txmsg = NULL;
5330 int ret;
5331
5332 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
5333 if (!mstb)
5334 return -EREMOTEIO;
5335
5336 if (!remote_i2c_read_ok(msgs, num)) {
5337 DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
5338 ret = -EIO;
5339 goto out;
5340 }
5341
5342 memset(&msg, 0, sizeof(msg));
5343 msg.req_type = DP_REMOTE_I2C_READ;
5344 msg.u.i2c_read.num_transactions = num - 1;
5345 msg.u.i2c_read.port_number = port->port_num;
5346 for (i = 0; i < num - 1; i++) {
5347 msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
5348 msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
5349 msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
5350 msg.u.i2c_read.transactions[i].no_stop_bit = !(msgs[i].flags & I2C_M_STOP);
5351 }
5352 msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
5353 msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
5354
5355 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
5356 if (!txmsg) {
5357 ret = -ENOMEM;
5358 goto out;
5359 }
5360
5361 txmsg->dst = mstb;
5362 drm_dp_encode_sideband_req(&msg, txmsg);
5363
5364 drm_dp_queue_down_tx(mgr, txmsg);
5365
5366 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
5367 if (ret > 0) {
5368
5369 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
5370 ret = -EREMOTEIO;
5371 goto out;
5372 }
5373 if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
5374 ret = -EIO;
5375 goto out;
5376 }
5377 memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
5378 ret = num;
5379 }
5380out:
5381 kfree(txmsg);
5382 drm_dp_mst_topology_put_mstb(mstb);
5383 return ret;
5384}
5385
5386static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
5387{
5388 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
5389 I2C_FUNC_SMBUS_READ_BLOCK_DATA |
5390 I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
5391 I2C_FUNC_10BIT_ADDR;
5392}
5393
5394static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
5395 .functionality = drm_dp_mst_i2c_functionality,
5396 .master_xfer = drm_dp_mst_i2c_xfer,
5397};
5398
5399/**
5400 * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
5401 * @aux: DisplayPort AUX channel
5402 *
5403 * Returns 0 on success or a negative error code on failure.
5404 */
5405static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux)
5406{
5407 aux->ddc.algo = &drm_dp_mst_i2c_algo;
5408 aux->ddc.algo_data = aux;
5409 aux->ddc.retries = 3;
5410
5411 aux->ddc.class = I2C_CLASS_DDC;
5412 aux->ddc.owner = THIS_MODULE;
5413 aux->ddc.dev.parent = aux->dev;
5414 aux->ddc.dev.of_node = aux->dev->of_node;
5415
5416 strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
5417 sizeof(aux->ddc.name));
5418
5419 return i2c_add_adapter(&aux->ddc);
5420}
5421
5422/**
5423 * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
5424 * @aux: DisplayPort AUX channel
5425 */
5426static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
5427{
5428 i2c_del_adapter(&aux->ddc);
5429}
5430
5431/**
5432 * drm_dp_mst_is_virtual_dpcd() - Is the given port a virtual DP Peer Device
5433 * @port: The port to check
5434 *
5435 * A single physical MST hub object can be represented in the topology
5436 * by multiple branches, with virtual ports between those branches.
5437 *
5438 * As of DP1.4, An MST hub with internal (virtual) ports must expose
5439 * certain DPCD registers over those ports. See sections 2.6.1.1.1
5440 * and 2.6.1.1.2 of Display Port specification v1.4 for details.
5441 *
5442 * May acquire mgr->lock
5443 *
5444 * Returns:
5445 * true if the port is a virtual DP peer device, false otherwise
5446 */
5447static bool drm_dp_mst_is_virtual_dpcd(struct drm_dp_mst_port *port)
5448{
5449 struct drm_dp_mst_port *downstream_port;
5450
5451 if (!port || port->dpcd_rev < DP_DPCD_REV_14)
5452 return false;
5453
5454 /* Virtual DP Sink (Internal Display Panel) */
5455 if (port->port_num >= 8)
5456 return true;
5457
5458 /* DP-to-HDMI Protocol Converter */
5459 if (port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV &&
5460 !port->mcs &&
5461 port->ldps)
5462 return true;
5463
5464 /* DP-to-DP */
5465 mutex_lock(&port->mgr->lock);
5466 if (port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
5467 port->mstb &&
5468 port->mstb->num_ports == 2) {
5469 list_for_each_entry(downstream_port, &port->mstb->ports, next) {
5470 if (downstream_port->pdt == DP_PEER_DEVICE_SST_SINK &&
5471 !downstream_port->input) {
5472 mutex_unlock(&port->mgr->lock);
5473 return true;
5474 }
5475 }
5476 }
5477 mutex_unlock(&port->mgr->lock);
5478
5479 return false;
5480}
5481
5482/**
5483 * drm_dp_mst_dsc_aux_for_port() - Find the correct aux for DSC
5484 * @port: The port to check. A leaf of the MST tree with an attached display.
5485 *
5486 * Depending on the situation, DSC may be enabled via the endpoint aux,
5487 * the immediately upstream aux, or the connector's physical aux.
5488 *
5489 * This is both the correct aux to read DSC_CAPABILITY and the
5490 * correct aux to write DSC_ENABLED.
5491 *
5492 * This operation can be expensive (up to four aux reads), so
5493 * the caller should cache the return.
5494 *
5495 * Returns:
5496 * NULL if DSC cannot be enabled on this port, otherwise the aux device
5497 */
5498struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
5499{
5500 struct drm_dp_mst_port *immediate_upstream_port;
5501 struct drm_dp_mst_port *fec_port;
5502 struct drm_dp_desc desc = { 0 };
5503 u8 endpoint_fec;
5504 u8 endpoint_dsc;
5505
5506 if (!port)
5507 return NULL;
5508
5509 if (port->parent->port_parent)
5510 immediate_upstream_port = port->parent->port_parent;
5511 else
5512 immediate_upstream_port = NULL;
5513
5514 fec_port = immediate_upstream_port;
5515 while (fec_port) {
5516 /*
5517 * Each physical link (i.e. not a virtual port) between the
5518 * output and the primary device must support FEC
5519 */
5520 if (!drm_dp_mst_is_virtual_dpcd(fec_port) &&
5521 !fec_port->fec_capable)
5522 return NULL;
5523
5524 fec_port = fec_port->parent->port_parent;
5525 }
5526
5527 /* DP-to-DP peer device */
5528 if (drm_dp_mst_is_virtual_dpcd(immediate_upstream_port)) {
5529 u8 upstream_dsc;
5530
5531 if (drm_dp_dpcd_read(&port->aux,
5532 DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
5533 return NULL;
5534 if (drm_dp_dpcd_read(&port->aux,
5535 DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
5536 return NULL;
5537 if (drm_dp_dpcd_read(&immediate_upstream_port->aux,
5538 DP_DSC_SUPPORT, &upstream_dsc, 1) != 1)
5539 return NULL;
5540
5541 /* Enpoint decompression with DP-to-DP peer device */
5542 if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
5543 (endpoint_fec & DP_FEC_CAPABLE) &&
5544 (upstream_dsc & 0x2) /* DSC passthrough */)
5545 return &port->aux;
5546
5547 /* Virtual DPCD decompression with DP-to-DP peer device */
5548 return &immediate_upstream_port->aux;
5549 }
5550
5551 /* Virtual DPCD decompression with DP-to-HDMI or Virtual DP Sink */
5552 if (drm_dp_mst_is_virtual_dpcd(port))
5553 return &port->aux;
5554
5555 /*
5556 * Synaptics quirk
5557 * Applies to ports for which:
5558 * - Physical aux has Synaptics OUI
5559 * - DPv1.4 or higher
5560 * - Port is on primary branch device
5561 * - Not a VGA adapter (DP_DWN_STRM_PORT_TYPE_ANALOG)
5562 */
5563 if (drm_dp_read_desc(port->mgr->aux, &desc, true))
5564 return NULL;
5565
5566 if (drm_dp_has_quirk(&desc, 0,
5567 DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) &&
5568 port->mgr->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14 &&
5569 port->parent == port->mgr->mst_primary) {
5570 u8 downstreamport;
5571
5572 if (drm_dp_dpcd_read(&port->aux, DP_DOWNSTREAMPORT_PRESENT,
5573 &downstreamport, 1) < 0)
5574 return NULL;
5575
5576 if ((downstreamport & DP_DWN_STRM_PORT_PRESENT) &&
5577 ((downstreamport & DP_DWN_STRM_PORT_TYPE_MASK)
5578 != DP_DWN_STRM_PORT_TYPE_ANALOG))
5579 return port->mgr->aux;
5580 }
5581
5582 /*
5583 * The check below verifies if the MST sink
5584 * connected to the GPU is capable of DSC -
5585 * therefore the endpoint needs to be
5586 * both DSC and FEC capable.
5587 */
5588 if (drm_dp_dpcd_read(&port->aux,
5589 DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
5590 return NULL;
5591 if (drm_dp_dpcd_read(&port->aux,
5592 DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
5593 return NULL;
5594 if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
5595 (endpoint_fec & DP_FEC_CAPABLE))
5596 return &port->aux;
5597
5598 return NULL;
5599}
5600EXPORT_SYMBOL(drm_dp_mst_dsc_aux_for_port);