Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.11-rc1 3122 lines 86 kB view raw
1/* 2 * Copyright © 2014 Red Hat 3 * 4 * Permission to use, copy, modify, distribute, and sell this software and its 5 * documentation for any purpose is hereby granted without fee, provided that 6 * the above copyright notice appear in all copies and that both that copyright 7 * notice and this permission notice appear in supporting documentation, and 8 * that the name of the copyright holders not be used in advertising or 9 * publicity pertaining to distribution of the software without specific, 10 * written prior permission. The copyright holders make no representations 11 * about the suitability of this software for any purpose. It is provided "as 12 * is" without express or implied warranty. 13 * 14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, 15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO 16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR 17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, 18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE 20 * OF THIS SOFTWARE. 21 */ 22 23#include <linux/kernel.h> 24#include <linux/delay.h> 25#include <linux/init.h> 26#include <linux/errno.h> 27#include <linux/sched.h> 28#include <linux/seq_file.h> 29#include <linux/i2c.h> 30#include <drm/drm_dp_mst_helper.h> 31#include <drm/drmP.h> 32 33#include <drm/drm_fixed.h> 34 35/** 36 * DOC: dp mst helper 37 * 38 * These functions contain parts of the DisplayPort 1.2a MultiStream Transport 39 * protocol. The helpers contain a topology manager and bandwidth manager. 40 * The helpers encapsulate the sending and received of sideband msgs. 41 */ 42static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr, 43 char *buf); 44static int test_calc_pbn_mode(void); 45 46static void drm_dp_put_port(struct drm_dp_mst_port *port); 47 48static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr, 49 int id, 50 struct drm_dp_payload *payload); 51 52static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr, 53 struct drm_dp_mst_port *port, 54 int offset, int size, u8 *bytes); 55 56static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, 57 struct drm_dp_mst_branch *mstb); 58static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, 59 struct drm_dp_mst_branch *mstb, 60 struct drm_dp_mst_port *port); 61static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr, 62 u8 *guid); 63 64static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux); 65static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux); 66static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr); 67/* sideband msg handling */ 68static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles) 69{ 70 u8 bitmask = 0x80; 71 u8 bitshift = 7; 72 u8 array_index = 0; 73 int number_of_bits = num_nibbles * 4; 74 u8 remainder = 0; 75 76 while (number_of_bits != 0) { 77 number_of_bits--; 78 remainder <<= 1; 79 remainder |= (data[array_index] & bitmask) >> bitshift; 80 bitmask >>= 1; 81 bitshift--; 82 if (bitmask == 0) { 83 bitmask = 0x80; 84 bitshift = 7; 85 array_index++; 86 } 87 if ((remainder & 0x10) == 0x10) 88 remainder ^= 0x13; 89 } 90 91 number_of_bits = 4; 92 while (number_of_bits != 0) { 93 number_of_bits--; 94 remainder <<= 1; 95 if ((remainder & 0x10) != 0) 96 remainder ^= 0x13; 97 } 98 99 return remainder; 100} 101 102static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes) 103{ 104 u8 bitmask = 0x80; 105 u8 bitshift = 7; 106 u8 array_index = 0; 107 int number_of_bits = number_of_bytes * 8; 108 u16 remainder = 0; 109 110 while (number_of_bits != 0) { 111 number_of_bits--; 112 remainder <<= 1; 113 remainder |= (data[array_index] & bitmask) >> bitshift; 114 bitmask >>= 1; 115 bitshift--; 116 if (bitmask == 0) { 117 bitmask = 0x80; 118 bitshift = 7; 119 array_index++; 120 } 121 if ((remainder & 0x100) == 0x100) 122 remainder ^= 0xd5; 123 } 124 125 number_of_bits = 8; 126 while (number_of_bits != 0) { 127 number_of_bits--; 128 remainder <<= 1; 129 if ((remainder & 0x100) != 0) 130 remainder ^= 0xd5; 131 } 132 133 return remainder & 0xff; 134} 135static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr) 136{ 137 u8 size = 3; 138 size += (hdr->lct / 2); 139 return size; 140} 141 142static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr, 143 u8 *buf, int *len) 144{ 145 int idx = 0; 146 int i; 147 u8 crc4; 148 buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf); 149 for (i = 0; i < (hdr->lct / 2); i++) 150 buf[idx++] = hdr->rad[i]; 151 buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) | 152 (hdr->msg_len & 0x3f); 153 buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4); 154 155 crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1); 156 buf[idx - 1] |= (crc4 & 0xf); 157 158 *len = idx; 159} 160 161static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr, 162 u8 *buf, int buflen, u8 *hdrlen) 163{ 164 u8 crc4; 165 u8 len; 166 int i; 167 u8 idx; 168 if (buf[0] == 0) 169 return false; 170 len = 3; 171 len += ((buf[0] & 0xf0) >> 4) / 2; 172 if (len > buflen) 173 return false; 174 crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1); 175 176 if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) { 177 DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]); 178 return false; 179 } 180 181 hdr->lct = (buf[0] & 0xf0) >> 4; 182 hdr->lcr = (buf[0] & 0xf); 183 idx = 1; 184 for (i = 0; i < (hdr->lct / 2); i++) 185 hdr->rad[i] = buf[idx++]; 186 hdr->broadcast = (buf[idx] >> 7) & 0x1; 187 hdr->path_msg = (buf[idx] >> 6) & 0x1; 188 hdr->msg_len = buf[idx] & 0x3f; 189 idx++; 190 hdr->somt = (buf[idx] >> 7) & 0x1; 191 hdr->eomt = (buf[idx] >> 6) & 0x1; 192 hdr->seqno = (buf[idx] >> 4) & 0x1; 193 idx++; 194 *hdrlen = idx; 195 return true; 196} 197 198static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req, 199 struct drm_dp_sideband_msg_tx *raw) 200{ 201 int idx = 0; 202 int i; 203 u8 *buf = raw->msg; 204 buf[idx++] = req->req_type & 0x7f; 205 206 switch (req->req_type) { 207 case DP_ENUM_PATH_RESOURCES: 208 buf[idx] = (req->u.port_num.port_number & 0xf) << 4; 209 idx++; 210 break; 211 case DP_ALLOCATE_PAYLOAD: 212 buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 | 213 (req->u.allocate_payload.number_sdp_streams & 0xf); 214 idx++; 215 buf[idx] = (req->u.allocate_payload.vcpi & 0x7f); 216 idx++; 217 buf[idx] = (req->u.allocate_payload.pbn >> 8); 218 idx++; 219 buf[idx] = (req->u.allocate_payload.pbn & 0xff); 220 idx++; 221 for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) { 222 buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) | 223 (req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf); 224 idx++; 225 } 226 if (req->u.allocate_payload.number_sdp_streams & 1) { 227 i = req->u.allocate_payload.number_sdp_streams - 1; 228 buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4; 229 idx++; 230 } 231 break; 232 case DP_QUERY_PAYLOAD: 233 buf[idx] = (req->u.query_payload.port_number & 0xf) << 4; 234 idx++; 235 buf[idx] = (req->u.query_payload.vcpi & 0x7f); 236 idx++; 237 break; 238 case DP_REMOTE_DPCD_READ: 239 buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4; 240 buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf; 241 idx++; 242 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8; 243 idx++; 244 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff); 245 idx++; 246 buf[idx] = (req->u.dpcd_read.num_bytes); 247 idx++; 248 break; 249 250 case DP_REMOTE_DPCD_WRITE: 251 buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4; 252 buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf; 253 idx++; 254 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8; 255 idx++; 256 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff); 257 idx++; 258 buf[idx] = (req->u.dpcd_write.num_bytes); 259 idx++; 260 memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes); 261 idx += req->u.dpcd_write.num_bytes; 262 break; 263 case DP_REMOTE_I2C_READ: 264 buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4; 265 buf[idx] |= (req->u.i2c_read.num_transactions & 0x3); 266 idx++; 267 for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) { 268 buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f; 269 idx++; 270 buf[idx] = req->u.i2c_read.transactions[i].num_bytes; 271 idx++; 272 memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes); 273 idx += req->u.i2c_read.transactions[i].num_bytes; 274 275 buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5; 276 buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf); 277 idx++; 278 } 279 buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f; 280 idx++; 281 buf[idx] = (req->u.i2c_read.num_bytes_read); 282 idx++; 283 break; 284 285 case DP_REMOTE_I2C_WRITE: 286 buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4; 287 idx++; 288 buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f; 289 idx++; 290 buf[idx] = (req->u.i2c_write.num_bytes); 291 idx++; 292 memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes); 293 idx += req->u.i2c_write.num_bytes; 294 break; 295 } 296 raw->cur_len = idx; 297} 298 299static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len) 300{ 301 u8 crc4; 302 crc4 = drm_dp_msg_data_crc4(msg, len); 303 msg[len] = crc4; 304} 305 306static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep, 307 struct drm_dp_sideband_msg_tx *raw) 308{ 309 int idx = 0; 310 u8 *buf = raw->msg; 311 312 buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f); 313 314 raw->cur_len = idx; 315} 316 317/* this adds a chunk of msg to the builder to get the final msg */ 318static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg, 319 u8 *replybuf, u8 replybuflen, bool hdr) 320{ 321 int ret; 322 u8 crc4; 323 324 if (hdr) { 325 u8 hdrlen; 326 struct drm_dp_sideband_msg_hdr recv_hdr; 327 ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen); 328 if (ret == false) { 329 print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false); 330 return false; 331 } 332 333 /* get length contained in this portion */ 334 msg->curchunk_len = recv_hdr.msg_len; 335 msg->curchunk_hdrlen = hdrlen; 336 337 /* we have already gotten an somt - don't bother parsing */ 338 if (recv_hdr.somt && msg->have_somt) 339 return false; 340 341 if (recv_hdr.somt) { 342 memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr)); 343 msg->have_somt = true; 344 } 345 if (recv_hdr.eomt) 346 msg->have_eomt = true; 347 348 /* copy the bytes for the remainder of this header chunk */ 349 msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen)); 350 memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx); 351 } else { 352 memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen); 353 msg->curchunk_idx += replybuflen; 354 } 355 356 if (msg->curchunk_idx >= msg->curchunk_len) { 357 /* do CRC */ 358 crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1); 359 /* copy chunk into bigger msg */ 360 memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1); 361 msg->curlen += msg->curchunk_len - 1; 362 } 363 return true; 364} 365 366static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw, 367 struct drm_dp_sideband_msg_reply_body *repmsg) 368{ 369 int idx = 1; 370 int i; 371 memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16); 372 idx += 16; 373 repmsg->u.link_addr.nports = raw->msg[idx] & 0xf; 374 idx++; 375 if (idx > raw->curlen) 376 goto fail_len; 377 for (i = 0; i < repmsg->u.link_addr.nports; i++) { 378 if (raw->msg[idx] & 0x80) 379 repmsg->u.link_addr.ports[i].input_port = 1; 380 381 repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7; 382 repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf); 383 384 idx++; 385 if (idx > raw->curlen) 386 goto fail_len; 387 repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1; 388 repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1; 389 if (repmsg->u.link_addr.ports[i].input_port == 0) 390 repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1; 391 idx++; 392 if (idx > raw->curlen) 393 goto fail_len; 394 if (repmsg->u.link_addr.ports[i].input_port == 0) { 395 repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]); 396 idx++; 397 if (idx > raw->curlen) 398 goto fail_len; 399 memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16); 400 idx += 16; 401 if (idx > raw->curlen) 402 goto fail_len; 403 repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf; 404 repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf); 405 idx++; 406 407 } 408 if (idx > raw->curlen) 409 goto fail_len; 410 } 411 412 return true; 413fail_len: 414 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen); 415 return false; 416} 417 418static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw, 419 struct drm_dp_sideband_msg_reply_body *repmsg) 420{ 421 int idx = 1; 422 repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf; 423 idx++; 424 if (idx > raw->curlen) 425 goto fail_len; 426 repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx]; 427 if (idx > raw->curlen) 428 goto fail_len; 429 430 memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes); 431 return true; 432fail_len: 433 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen); 434 return false; 435} 436 437static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw, 438 struct drm_dp_sideband_msg_reply_body *repmsg) 439{ 440 int idx = 1; 441 repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf; 442 idx++; 443 if (idx > raw->curlen) 444 goto fail_len; 445 return true; 446fail_len: 447 DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen); 448 return false; 449} 450 451static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw, 452 struct drm_dp_sideband_msg_reply_body *repmsg) 453{ 454 int idx = 1; 455 456 repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf); 457 idx++; 458 if (idx > raw->curlen) 459 goto fail_len; 460 repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx]; 461 idx++; 462 /* TODO check */ 463 memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes); 464 return true; 465fail_len: 466 DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen); 467 return false; 468} 469 470static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw, 471 struct drm_dp_sideband_msg_reply_body *repmsg) 472{ 473 int idx = 1; 474 repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf; 475 idx++; 476 if (idx > raw->curlen) 477 goto fail_len; 478 repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]); 479 idx += 2; 480 if (idx > raw->curlen) 481 goto fail_len; 482 repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]); 483 idx += 2; 484 if (idx > raw->curlen) 485 goto fail_len; 486 return true; 487fail_len: 488 DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen); 489 return false; 490} 491 492static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw, 493 struct drm_dp_sideband_msg_reply_body *repmsg) 494{ 495 int idx = 1; 496 repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf; 497 idx++; 498 if (idx > raw->curlen) 499 goto fail_len; 500 repmsg->u.allocate_payload.vcpi = raw->msg[idx]; 501 idx++; 502 if (idx > raw->curlen) 503 goto fail_len; 504 repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]); 505 idx += 2; 506 if (idx > raw->curlen) 507 goto fail_len; 508 return true; 509fail_len: 510 DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen); 511 return false; 512} 513 514static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw, 515 struct drm_dp_sideband_msg_reply_body *repmsg) 516{ 517 int idx = 1; 518 repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf; 519 idx++; 520 if (idx > raw->curlen) 521 goto fail_len; 522 repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]); 523 idx += 2; 524 if (idx > raw->curlen) 525 goto fail_len; 526 return true; 527fail_len: 528 DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen); 529 return false; 530} 531 532static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw, 533 struct drm_dp_sideband_msg_reply_body *msg) 534{ 535 memset(msg, 0, sizeof(*msg)); 536 msg->reply_type = (raw->msg[0] & 0x80) >> 7; 537 msg->req_type = (raw->msg[0] & 0x7f); 538 539 if (msg->reply_type) { 540 memcpy(msg->u.nak.guid, &raw->msg[1], 16); 541 msg->u.nak.reason = raw->msg[17]; 542 msg->u.nak.nak_data = raw->msg[18]; 543 return false; 544 } 545 546 switch (msg->req_type) { 547 case DP_LINK_ADDRESS: 548 return drm_dp_sideband_parse_link_address(raw, msg); 549 case DP_QUERY_PAYLOAD: 550 return drm_dp_sideband_parse_query_payload_ack(raw, msg); 551 case DP_REMOTE_DPCD_READ: 552 return drm_dp_sideband_parse_remote_dpcd_read(raw, msg); 553 case DP_REMOTE_DPCD_WRITE: 554 return drm_dp_sideband_parse_remote_dpcd_write(raw, msg); 555 case DP_REMOTE_I2C_READ: 556 return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg); 557 case DP_ENUM_PATH_RESOURCES: 558 return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg); 559 case DP_ALLOCATE_PAYLOAD: 560 return drm_dp_sideband_parse_allocate_payload_ack(raw, msg); 561 default: 562 DRM_ERROR("Got unknown reply 0x%02x\n", msg->req_type); 563 return false; 564 } 565} 566 567static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw, 568 struct drm_dp_sideband_msg_req_body *msg) 569{ 570 int idx = 1; 571 572 msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4; 573 idx++; 574 if (idx > raw->curlen) 575 goto fail_len; 576 577 memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16); 578 idx += 16; 579 if (idx > raw->curlen) 580 goto fail_len; 581 582 msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1; 583 msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1; 584 msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1; 585 msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1; 586 msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7); 587 idx++; 588 return true; 589fail_len: 590 DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen); 591 return false; 592} 593 594static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw, 595 struct drm_dp_sideband_msg_req_body *msg) 596{ 597 int idx = 1; 598 599 msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4; 600 idx++; 601 if (idx > raw->curlen) 602 goto fail_len; 603 604 memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16); 605 idx += 16; 606 if (idx > raw->curlen) 607 goto fail_len; 608 609 msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]); 610 idx++; 611 return true; 612fail_len: 613 DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen); 614 return false; 615} 616 617static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw, 618 struct drm_dp_sideband_msg_req_body *msg) 619{ 620 memset(msg, 0, sizeof(*msg)); 621 msg->req_type = (raw->msg[0] & 0x7f); 622 623 switch (msg->req_type) { 624 case DP_CONNECTION_STATUS_NOTIFY: 625 return drm_dp_sideband_parse_connection_status_notify(raw, msg); 626 case DP_RESOURCE_STATUS_NOTIFY: 627 return drm_dp_sideband_parse_resource_status_notify(raw, msg); 628 default: 629 DRM_ERROR("Got unknown request 0x%02x\n", msg->req_type); 630 return false; 631 } 632} 633 634static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes) 635{ 636 struct drm_dp_sideband_msg_req_body req; 637 638 req.req_type = DP_REMOTE_DPCD_WRITE; 639 req.u.dpcd_write.port_number = port_num; 640 req.u.dpcd_write.dpcd_address = offset; 641 req.u.dpcd_write.num_bytes = num_bytes; 642 req.u.dpcd_write.bytes = bytes; 643 drm_dp_encode_sideband_req(&req, msg); 644 645 return 0; 646} 647 648static int build_link_address(struct drm_dp_sideband_msg_tx *msg) 649{ 650 struct drm_dp_sideband_msg_req_body req; 651 652 req.req_type = DP_LINK_ADDRESS; 653 drm_dp_encode_sideband_req(&req, msg); 654 return 0; 655} 656 657static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num) 658{ 659 struct drm_dp_sideband_msg_req_body req; 660 661 req.req_type = DP_ENUM_PATH_RESOURCES; 662 req.u.port_num.port_number = port_num; 663 drm_dp_encode_sideband_req(&req, msg); 664 msg->path_msg = true; 665 return 0; 666} 667 668static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num, 669 u8 vcpi, uint16_t pbn, 670 u8 number_sdp_streams, 671 u8 *sdp_stream_sink) 672{ 673 struct drm_dp_sideband_msg_req_body req; 674 memset(&req, 0, sizeof(req)); 675 req.req_type = DP_ALLOCATE_PAYLOAD; 676 req.u.allocate_payload.port_number = port_num; 677 req.u.allocate_payload.vcpi = vcpi; 678 req.u.allocate_payload.pbn = pbn; 679 req.u.allocate_payload.number_sdp_streams = number_sdp_streams; 680 memcpy(req.u.allocate_payload.sdp_stream_sink, sdp_stream_sink, 681 number_sdp_streams); 682 drm_dp_encode_sideband_req(&req, msg); 683 msg->path_msg = true; 684 return 0; 685} 686 687static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr, 688 struct drm_dp_vcpi *vcpi) 689{ 690 int ret, vcpi_ret; 691 692 mutex_lock(&mgr->payload_lock); 693 ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1); 694 if (ret > mgr->max_payloads) { 695 ret = -EINVAL; 696 DRM_DEBUG_KMS("out of payload ids %d\n", ret); 697 goto out_unlock; 698 } 699 700 vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1); 701 if (vcpi_ret > mgr->max_payloads) { 702 ret = -EINVAL; 703 DRM_DEBUG_KMS("out of vcpi ids %d\n", ret); 704 goto out_unlock; 705 } 706 707 set_bit(ret, &mgr->payload_mask); 708 set_bit(vcpi_ret, &mgr->vcpi_mask); 709 vcpi->vcpi = vcpi_ret + 1; 710 mgr->proposed_vcpis[ret - 1] = vcpi; 711out_unlock: 712 mutex_unlock(&mgr->payload_lock); 713 return ret; 714} 715 716static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr, 717 int vcpi) 718{ 719 int i; 720 if (vcpi == 0) 721 return; 722 723 mutex_lock(&mgr->payload_lock); 724 DRM_DEBUG_KMS("putting payload %d\n", vcpi); 725 clear_bit(vcpi - 1, &mgr->vcpi_mask); 726 727 for (i = 0; i < mgr->max_payloads; i++) { 728 if (mgr->proposed_vcpis[i]) 729 if (mgr->proposed_vcpis[i]->vcpi == vcpi) { 730 mgr->proposed_vcpis[i] = NULL; 731 clear_bit(i + 1, &mgr->payload_mask); 732 } 733 } 734 mutex_unlock(&mgr->payload_lock); 735} 736 737static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr, 738 struct drm_dp_sideband_msg_tx *txmsg) 739{ 740 bool ret; 741 742 /* 743 * All updates to txmsg->state are protected by mgr->qlock, and the two 744 * cases we check here are terminal states. For those the barriers 745 * provided by the wake_up/wait_event pair are enough. 746 */ 747 ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX || 748 txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT); 749 return ret; 750} 751 752static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb, 753 struct drm_dp_sideband_msg_tx *txmsg) 754{ 755 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; 756 int ret; 757 758 ret = wait_event_timeout(mgr->tx_waitq, 759 check_txmsg_state(mgr, txmsg), 760 (4 * HZ)); 761 mutex_lock(&mstb->mgr->qlock); 762 if (ret > 0) { 763 if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) { 764 ret = -EIO; 765 goto out; 766 } 767 } else { 768 DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno); 769 770 /* dump some state */ 771 ret = -EIO; 772 773 /* remove from q */ 774 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED || 775 txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) { 776 list_del(&txmsg->next); 777 } 778 779 if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND || 780 txmsg->state == DRM_DP_SIDEBAND_TX_SENT) { 781 mstb->tx_slots[txmsg->seqno] = NULL; 782 } 783 } 784out: 785 mutex_unlock(&mgr->qlock); 786 787 return ret; 788} 789 790static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad) 791{ 792 struct drm_dp_mst_branch *mstb; 793 794 mstb = kzalloc(sizeof(*mstb), GFP_KERNEL); 795 if (!mstb) 796 return NULL; 797 798 mstb->lct = lct; 799 if (lct > 1) 800 memcpy(mstb->rad, rad, lct / 2); 801 INIT_LIST_HEAD(&mstb->ports); 802 kref_init(&mstb->kref); 803 return mstb; 804} 805 806static void drm_dp_free_mst_port(struct kref *kref); 807 808static void drm_dp_free_mst_branch_device(struct kref *kref) 809{ 810 struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref); 811 if (mstb->port_parent) { 812 if (list_empty(&mstb->port_parent->next)) 813 kref_put(&mstb->port_parent->kref, drm_dp_free_mst_port); 814 } 815 kfree(mstb); 816} 817 818static void drm_dp_destroy_mst_branch_device(struct kref *kref) 819{ 820 struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref); 821 struct drm_dp_mst_port *port, *tmp; 822 bool wake_tx = false; 823 824 /* 825 * init kref again to be used by ports to remove mst branch when it is 826 * not needed anymore 827 */ 828 kref_init(kref); 829 830 if (mstb->port_parent && list_empty(&mstb->port_parent->next)) 831 kref_get(&mstb->port_parent->kref); 832 833 /* 834 * destroy all ports - don't need lock 835 * as there are no more references to the mst branch 836 * device at this point. 837 */ 838 list_for_each_entry_safe(port, tmp, &mstb->ports, next) { 839 list_del(&port->next); 840 drm_dp_put_port(port); 841 } 842 843 /* drop any tx slots msg */ 844 mutex_lock(&mstb->mgr->qlock); 845 if (mstb->tx_slots[0]) { 846 mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT; 847 mstb->tx_slots[0] = NULL; 848 wake_tx = true; 849 } 850 if (mstb->tx_slots[1]) { 851 mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT; 852 mstb->tx_slots[1] = NULL; 853 wake_tx = true; 854 } 855 mutex_unlock(&mstb->mgr->qlock); 856 857 if (wake_tx) 858 wake_up(&mstb->mgr->tx_waitq); 859 860 kref_put(kref, drm_dp_free_mst_branch_device); 861} 862 863static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb) 864{ 865 kref_put(&mstb->kref, drm_dp_destroy_mst_branch_device); 866} 867 868 869static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt) 870{ 871 struct drm_dp_mst_branch *mstb; 872 873 switch (old_pdt) { 874 case DP_PEER_DEVICE_DP_LEGACY_CONV: 875 case DP_PEER_DEVICE_SST_SINK: 876 /* remove i2c over sideband */ 877 drm_dp_mst_unregister_i2c_bus(&port->aux); 878 break; 879 case DP_PEER_DEVICE_MST_BRANCHING: 880 mstb = port->mstb; 881 port->mstb = NULL; 882 drm_dp_put_mst_branch_device(mstb); 883 break; 884 } 885} 886 887static void drm_dp_destroy_port(struct kref *kref) 888{ 889 struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref); 890 struct drm_dp_mst_topology_mgr *mgr = port->mgr; 891 892 if (!port->input) { 893 port->vcpi.num_slots = 0; 894 895 kfree(port->cached_edid); 896 897 /* 898 * The only time we don't have a connector 899 * on an output port is if the connector init 900 * fails. 901 */ 902 if (port->connector) { 903 /* we can't destroy the connector here, as 904 * we might be holding the mode_config.mutex 905 * from an EDID retrieval */ 906 907 mutex_lock(&mgr->destroy_connector_lock); 908 kref_get(&port->parent->kref); 909 list_add(&port->next, &mgr->destroy_connector_list); 910 mutex_unlock(&mgr->destroy_connector_lock); 911 schedule_work(&mgr->destroy_connector_work); 912 return; 913 } 914 /* no need to clean up vcpi 915 * as if we have no connector we never setup a vcpi */ 916 drm_dp_port_teardown_pdt(port, port->pdt); 917 port->pdt = DP_PEER_DEVICE_NONE; 918 } 919 kfree(port); 920} 921 922static void drm_dp_put_port(struct drm_dp_mst_port *port) 923{ 924 kref_put(&port->kref, drm_dp_destroy_port); 925} 926 927static struct drm_dp_mst_branch *drm_dp_mst_get_validated_mstb_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_branch *to_find) 928{ 929 struct drm_dp_mst_port *port; 930 struct drm_dp_mst_branch *rmstb; 931 if (to_find == mstb) { 932 kref_get(&mstb->kref); 933 return mstb; 934 } 935 list_for_each_entry(port, &mstb->ports, next) { 936 if (port->mstb) { 937 rmstb = drm_dp_mst_get_validated_mstb_ref_locked(port->mstb, to_find); 938 if (rmstb) 939 return rmstb; 940 } 941 } 942 return NULL; 943} 944 945static struct drm_dp_mst_branch *drm_dp_get_validated_mstb_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb) 946{ 947 struct drm_dp_mst_branch *rmstb = NULL; 948 mutex_lock(&mgr->lock); 949 if (mgr->mst_primary) 950 rmstb = drm_dp_mst_get_validated_mstb_ref_locked(mgr->mst_primary, mstb); 951 mutex_unlock(&mgr->lock); 952 return rmstb; 953} 954 955static struct drm_dp_mst_port *drm_dp_mst_get_port_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *to_find) 956{ 957 struct drm_dp_mst_port *port, *mport; 958 959 list_for_each_entry(port, &mstb->ports, next) { 960 if (port == to_find) { 961 kref_get(&port->kref); 962 return port; 963 } 964 if (port->mstb) { 965 mport = drm_dp_mst_get_port_ref_locked(port->mstb, to_find); 966 if (mport) 967 return mport; 968 } 969 } 970 return NULL; 971} 972 973static struct drm_dp_mst_port *drm_dp_get_validated_port_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) 974{ 975 struct drm_dp_mst_port *rport = NULL; 976 mutex_lock(&mgr->lock); 977 if (mgr->mst_primary) 978 rport = drm_dp_mst_get_port_ref_locked(mgr->mst_primary, port); 979 mutex_unlock(&mgr->lock); 980 return rport; 981} 982 983static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num) 984{ 985 struct drm_dp_mst_port *port; 986 987 list_for_each_entry(port, &mstb->ports, next) { 988 if (port->port_num == port_num) { 989 kref_get(&port->kref); 990 return port; 991 } 992 } 993 994 return NULL; 995} 996 997/* 998 * calculate a new RAD for this MST branch device 999 * if parent has an LCT of 2 then it has 1 nibble of RAD, 1000 * if parent has an LCT of 3 then it has 2 nibbles of RAD, 1001 */ 1002static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port, 1003 u8 *rad) 1004{ 1005 int parent_lct = port->parent->lct; 1006 int shift = 4; 1007 int idx = (parent_lct - 1) / 2; 1008 if (parent_lct > 1) { 1009 memcpy(rad, port->parent->rad, idx + 1); 1010 shift = (parent_lct % 2) ? 4 : 0; 1011 } else 1012 rad[0] = 0; 1013 1014 rad[idx] |= port->port_num << shift; 1015 return parent_lct + 1; 1016} 1017 1018/* 1019 * return sends link address for new mstb 1020 */ 1021static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port) 1022{ 1023 int ret; 1024 u8 rad[6], lct; 1025 bool send_link = false; 1026 switch (port->pdt) { 1027 case DP_PEER_DEVICE_DP_LEGACY_CONV: 1028 case DP_PEER_DEVICE_SST_SINK: 1029 /* add i2c over sideband */ 1030 ret = drm_dp_mst_register_i2c_bus(&port->aux); 1031 break; 1032 case DP_PEER_DEVICE_MST_BRANCHING: 1033 lct = drm_dp_calculate_rad(port, rad); 1034 1035 port->mstb = drm_dp_add_mst_branch_device(lct, rad); 1036 port->mstb->mgr = port->mgr; 1037 port->mstb->port_parent = port; 1038 1039 send_link = true; 1040 break; 1041 } 1042 return send_link; 1043} 1044 1045static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid) 1046{ 1047 int ret; 1048 1049 memcpy(mstb->guid, guid, 16); 1050 1051 if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) { 1052 if (mstb->port_parent) { 1053 ret = drm_dp_send_dpcd_write( 1054 mstb->mgr, 1055 mstb->port_parent, 1056 DP_GUID, 1057 16, 1058 mstb->guid); 1059 } else { 1060 1061 ret = drm_dp_dpcd_write( 1062 mstb->mgr->aux, 1063 DP_GUID, 1064 mstb->guid, 1065 16); 1066 } 1067 } 1068} 1069 1070static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb, 1071 int pnum, 1072 char *proppath, 1073 size_t proppath_size) 1074{ 1075 int i; 1076 char temp[8]; 1077 snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id); 1078 for (i = 0; i < (mstb->lct - 1); i++) { 1079 int shift = (i % 2) ? 0 : 4; 1080 int port_num = (mstb->rad[i / 2] >> shift) & 0xf; 1081 snprintf(temp, sizeof(temp), "-%d", port_num); 1082 strlcat(proppath, temp, proppath_size); 1083 } 1084 snprintf(temp, sizeof(temp), "-%d", pnum); 1085 strlcat(proppath, temp, proppath_size); 1086} 1087 1088static void drm_dp_add_port(struct drm_dp_mst_branch *mstb, 1089 struct drm_device *dev, 1090 struct drm_dp_link_addr_reply_port *port_msg) 1091{ 1092 struct drm_dp_mst_port *port; 1093 bool ret; 1094 bool created = false; 1095 int old_pdt = 0; 1096 int old_ddps = 0; 1097 port = drm_dp_get_port(mstb, port_msg->port_number); 1098 if (!port) { 1099 port = kzalloc(sizeof(*port), GFP_KERNEL); 1100 if (!port) 1101 return; 1102 kref_init(&port->kref); 1103 port->parent = mstb; 1104 port->port_num = port_msg->port_number; 1105 port->mgr = mstb->mgr; 1106 port->aux.name = "DPMST"; 1107 port->aux.dev = dev->dev; 1108 created = true; 1109 } else { 1110 old_pdt = port->pdt; 1111 old_ddps = port->ddps; 1112 } 1113 1114 port->pdt = port_msg->peer_device_type; 1115 port->input = port_msg->input_port; 1116 port->mcs = port_msg->mcs; 1117 port->ddps = port_msg->ddps; 1118 port->ldps = port_msg->legacy_device_plug_status; 1119 port->dpcd_rev = port_msg->dpcd_revision; 1120 port->num_sdp_streams = port_msg->num_sdp_streams; 1121 port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks; 1122 1123 /* manage mstb port lists with mgr lock - take a reference 1124 for this list */ 1125 if (created) { 1126 mutex_lock(&mstb->mgr->lock); 1127 kref_get(&port->kref); 1128 list_add(&port->next, &mstb->ports); 1129 mutex_unlock(&mstb->mgr->lock); 1130 } 1131 1132 if (old_ddps != port->ddps) { 1133 if (port->ddps) { 1134 if (!port->input) 1135 drm_dp_send_enum_path_resources(mstb->mgr, mstb, port); 1136 } else { 1137 port->available_pbn = 0; 1138 } 1139 } 1140 1141 if (old_pdt != port->pdt && !port->input) { 1142 drm_dp_port_teardown_pdt(port, old_pdt); 1143 1144 ret = drm_dp_port_setup_pdt(port); 1145 if (ret == true) 1146 drm_dp_send_link_address(mstb->mgr, port->mstb); 1147 } 1148 1149 if (created && !port->input) { 1150 char proppath[255]; 1151 1152 build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath)); 1153 port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath); 1154 if (!port->connector) { 1155 /* remove it from the port list */ 1156 mutex_lock(&mstb->mgr->lock); 1157 list_del(&port->next); 1158 mutex_unlock(&mstb->mgr->lock); 1159 /* drop port list reference */ 1160 drm_dp_put_port(port); 1161 goto out; 1162 } 1163 if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV || 1164 port->pdt == DP_PEER_DEVICE_SST_SINK) && 1165 port->port_num >= DP_MST_LOGICAL_PORT_0) { 1166 port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc); 1167 drm_mode_connector_set_tile_property(port->connector); 1168 } 1169 (*mstb->mgr->cbs->register_connector)(port->connector); 1170 } 1171 1172out: 1173 /* put reference to this port */ 1174 drm_dp_put_port(port); 1175} 1176 1177static void drm_dp_update_port(struct drm_dp_mst_branch *mstb, 1178 struct drm_dp_connection_status_notify *conn_stat) 1179{ 1180 struct drm_dp_mst_port *port; 1181 int old_pdt; 1182 int old_ddps; 1183 bool dowork = false; 1184 port = drm_dp_get_port(mstb, conn_stat->port_number); 1185 if (!port) 1186 return; 1187 1188 old_ddps = port->ddps; 1189 old_pdt = port->pdt; 1190 port->pdt = conn_stat->peer_device_type; 1191 port->mcs = conn_stat->message_capability_status; 1192 port->ldps = conn_stat->legacy_device_plug_status; 1193 port->ddps = conn_stat->displayport_device_plug_status; 1194 1195 if (old_ddps != port->ddps) { 1196 if (port->ddps) { 1197 dowork = true; 1198 } else { 1199 port->available_pbn = 0; 1200 } 1201 } 1202 if (old_pdt != port->pdt && !port->input) { 1203 drm_dp_port_teardown_pdt(port, old_pdt); 1204 1205 if (drm_dp_port_setup_pdt(port)) 1206 dowork = true; 1207 } 1208 1209 drm_dp_put_port(port); 1210 if (dowork) 1211 queue_work(system_long_wq, &mstb->mgr->work); 1212 1213} 1214 1215static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr, 1216 u8 lct, u8 *rad) 1217{ 1218 struct drm_dp_mst_branch *mstb; 1219 struct drm_dp_mst_port *port; 1220 int i; 1221 /* find the port by iterating down */ 1222 1223 mutex_lock(&mgr->lock); 1224 mstb = mgr->mst_primary; 1225 1226 for (i = 0; i < lct - 1; i++) { 1227 int shift = (i % 2) ? 0 : 4; 1228 int port_num = (rad[i / 2] >> shift) & 0xf; 1229 1230 list_for_each_entry(port, &mstb->ports, next) { 1231 if (port->port_num == port_num) { 1232 mstb = port->mstb; 1233 if (!mstb) { 1234 DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]); 1235 goto out; 1236 } 1237 1238 break; 1239 } 1240 } 1241 } 1242 kref_get(&mstb->kref); 1243out: 1244 mutex_unlock(&mgr->lock); 1245 return mstb; 1246} 1247 1248static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper( 1249 struct drm_dp_mst_branch *mstb, 1250 uint8_t *guid) 1251{ 1252 struct drm_dp_mst_branch *found_mstb; 1253 struct drm_dp_mst_port *port; 1254 1255 if (memcmp(mstb->guid, guid, 16) == 0) 1256 return mstb; 1257 1258 1259 list_for_each_entry(port, &mstb->ports, next) { 1260 if (!port->mstb) 1261 continue; 1262 1263 found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid); 1264 1265 if (found_mstb) 1266 return found_mstb; 1267 } 1268 1269 return NULL; 1270} 1271 1272static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device_by_guid( 1273 struct drm_dp_mst_topology_mgr *mgr, 1274 uint8_t *guid) 1275{ 1276 struct drm_dp_mst_branch *mstb; 1277 1278 /* find the port by iterating down */ 1279 mutex_lock(&mgr->lock); 1280 1281 mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid); 1282 1283 if (mstb) 1284 kref_get(&mstb->kref); 1285 1286 mutex_unlock(&mgr->lock); 1287 return mstb; 1288} 1289 1290static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr, 1291 struct drm_dp_mst_branch *mstb) 1292{ 1293 struct drm_dp_mst_port *port; 1294 struct drm_dp_mst_branch *mstb_child; 1295 if (!mstb->link_address_sent) 1296 drm_dp_send_link_address(mgr, mstb); 1297 1298 list_for_each_entry(port, &mstb->ports, next) { 1299 if (port->input) 1300 continue; 1301 1302 if (!port->ddps) 1303 continue; 1304 1305 if (!port->available_pbn) 1306 drm_dp_send_enum_path_resources(mgr, mstb, port); 1307 1308 if (port->mstb) { 1309 mstb_child = drm_dp_get_validated_mstb_ref(mgr, port->mstb); 1310 if (mstb_child) { 1311 drm_dp_check_and_send_link_address(mgr, mstb_child); 1312 drm_dp_put_mst_branch_device(mstb_child); 1313 } 1314 } 1315 } 1316} 1317 1318static void drm_dp_mst_link_probe_work(struct work_struct *work) 1319{ 1320 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work); 1321 struct drm_dp_mst_branch *mstb; 1322 1323 mutex_lock(&mgr->lock); 1324 mstb = mgr->mst_primary; 1325 if (mstb) { 1326 kref_get(&mstb->kref); 1327 } 1328 mutex_unlock(&mgr->lock); 1329 if (mstb) { 1330 drm_dp_check_and_send_link_address(mgr, mstb); 1331 drm_dp_put_mst_branch_device(mstb); 1332 } 1333} 1334 1335static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr, 1336 u8 *guid) 1337{ 1338 static u8 zero_guid[16]; 1339 1340 if (!memcmp(guid, zero_guid, 16)) { 1341 u64 salt = get_jiffies_64(); 1342 memcpy(&guid[0], &salt, sizeof(u64)); 1343 memcpy(&guid[8], &salt, sizeof(u64)); 1344 return false; 1345 } 1346 return true; 1347} 1348 1349#if 0 1350static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes) 1351{ 1352 struct drm_dp_sideband_msg_req_body req; 1353 1354 req.req_type = DP_REMOTE_DPCD_READ; 1355 req.u.dpcd_read.port_number = port_num; 1356 req.u.dpcd_read.dpcd_address = offset; 1357 req.u.dpcd_read.num_bytes = num_bytes; 1358 drm_dp_encode_sideband_req(&req, msg); 1359 1360 return 0; 1361} 1362#endif 1363 1364static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr, 1365 bool up, u8 *msg, int len) 1366{ 1367 int ret; 1368 int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE; 1369 int tosend, total, offset; 1370 int retries = 0; 1371 1372retry: 1373 total = len; 1374 offset = 0; 1375 do { 1376 tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total); 1377 1378 ret = drm_dp_dpcd_write(mgr->aux, regbase + offset, 1379 &msg[offset], 1380 tosend); 1381 if (ret != tosend) { 1382 if (ret == -EIO && retries < 5) { 1383 retries++; 1384 goto retry; 1385 } 1386 DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret); 1387 1388 return -EIO; 1389 } 1390 offset += tosend; 1391 total -= tosend; 1392 } while (total > 0); 1393 return 0; 1394} 1395 1396static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr, 1397 struct drm_dp_sideband_msg_tx *txmsg) 1398{ 1399 struct drm_dp_mst_branch *mstb = txmsg->dst; 1400 u8 req_type; 1401 1402 /* both msg slots are full */ 1403 if (txmsg->seqno == -1) { 1404 if (mstb->tx_slots[0] && mstb->tx_slots[1]) { 1405 DRM_DEBUG_KMS("%s: failed to find slot\n", __func__); 1406 return -EAGAIN; 1407 } 1408 if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) { 1409 txmsg->seqno = mstb->last_seqno; 1410 mstb->last_seqno ^= 1; 1411 } else if (mstb->tx_slots[0] == NULL) 1412 txmsg->seqno = 0; 1413 else 1414 txmsg->seqno = 1; 1415 mstb->tx_slots[txmsg->seqno] = txmsg; 1416 } 1417 1418 req_type = txmsg->msg[0] & 0x7f; 1419 if (req_type == DP_CONNECTION_STATUS_NOTIFY || 1420 req_type == DP_RESOURCE_STATUS_NOTIFY) 1421 hdr->broadcast = 1; 1422 else 1423 hdr->broadcast = 0; 1424 hdr->path_msg = txmsg->path_msg; 1425 hdr->lct = mstb->lct; 1426 hdr->lcr = mstb->lct - 1; 1427 if (mstb->lct > 1) 1428 memcpy(hdr->rad, mstb->rad, mstb->lct / 2); 1429 hdr->seqno = txmsg->seqno; 1430 return 0; 1431} 1432/* 1433 * process a single block of the next message in the sideband queue 1434 */ 1435static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr, 1436 struct drm_dp_sideband_msg_tx *txmsg, 1437 bool up) 1438{ 1439 u8 chunk[48]; 1440 struct drm_dp_sideband_msg_hdr hdr; 1441 int len, space, idx, tosend; 1442 int ret; 1443 1444 memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr)); 1445 1446 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) { 1447 txmsg->seqno = -1; 1448 txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND; 1449 } 1450 1451 /* make hdr from dst mst - for replies use seqno 1452 otherwise assign one */ 1453 ret = set_hdr_from_dst_qlock(&hdr, txmsg); 1454 if (ret < 0) 1455 return ret; 1456 1457 /* amount left to send in this message */ 1458 len = txmsg->cur_len - txmsg->cur_offset; 1459 1460 /* 48 - sideband msg size - 1 byte for data CRC, x header bytes */ 1461 space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr); 1462 1463 tosend = min(len, space); 1464 if (len == txmsg->cur_len) 1465 hdr.somt = 1; 1466 if (space >= len) 1467 hdr.eomt = 1; 1468 1469 1470 hdr.msg_len = tosend + 1; 1471 drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx); 1472 memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend); 1473 /* add crc at end */ 1474 drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend); 1475 idx += tosend + 1; 1476 1477 ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx); 1478 if (ret) { 1479 DRM_DEBUG_KMS("sideband msg failed to send\n"); 1480 return ret; 1481 } 1482 1483 txmsg->cur_offset += tosend; 1484 if (txmsg->cur_offset == txmsg->cur_len) { 1485 txmsg->state = DRM_DP_SIDEBAND_TX_SENT; 1486 return 1; 1487 } 1488 return 0; 1489} 1490 1491static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) 1492{ 1493 struct drm_dp_sideband_msg_tx *txmsg; 1494 int ret; 1495 1496 WARN_ON(!mutex_is_locked(&mgr->qlock)); 1497 1498 /* construct a chunk from the first msg in the tx_msg queue */ 1499 if (list_empty(&mgr->tx_msg_downq)) 1500 return; 1501 1502 txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next); 1503 ret = process_single_tx_qlock(mgr, txmsg, false); 1504 if (ret == 1) { 1505 /* txmsg is sent it should be in the slots now */ 1506 list_del(&txmsg->next); 1507 } else if (ret) { 1508 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret); 1509 list_del(&txmsg->next); 1510 if (txmsg->seqno != -1) 1511 txmsg->dst->tx_slots[txmsg->seqno] = NULL; 1512 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT; 1513 wake_up(&mgr->tx_waitq); 1514 } 1515} 1516 1517/* called holding qlock */ 1518static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr, 1519 struct drm_dp_sideband_msg_tx *txmsg) 1520{ 1521 int ret; 1522 1523 /* construct a chunk from the first msg in the tx_msg queue */ 1524 ret = process_single_tx_qlock(mgr, txmsg, true); 1525 1526 if (ret != 1) 1527 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret); 1528 1529 txmsg->dst->tx_slots[txmsg->seqno] = NULL; 1530} 1531 1532static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr, 1533 struct drm_dp_sideband_msg_tx *txmsg) 1534{ 1535 mutex_lock(&mgr->qlock); 1536 list_add_tail(&txmsg->next, &mgr->tx_msg_downq); 1537 if (list_is_singular(&mgr->tx_msg_downq)) 1538 process_single_down_tx_qlock(mgr); 1539 mutex_unlock(&mgr->qlock); 1540} 1541 1542static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, 1543 struct drm_dp_mst_branch *mstb) 1544{ 1545 int len; 1546 struct drm_dp_sideband_msg_tx *txmsg; 1547 int ret; 1548 1549 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 1550 if (!txmsg) 1551 return; 1552 1553 txmsg->dst = mstb; 1554 len = build_link_address(txmsg); 1555 1556 mstb->link_address_sent = true; 1557 drm_dp_queue_down_tx(mgr, txmsg); 1558 1559 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); 1560 if (ret > 0) { 1561 int i; 1562 1563 if (txmsg->reply.reply_type == 1) 1564 DRM_DEBUG_KMS("link address nak received\n"); 1565 else { 1566 DRM_DEBUG_KMS("link address reply: %d\n", txmsg->reply.u.link_addr.nports); 1567 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) { 1568 DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", i, 1569 txmsg->reply.u.link_addr.ports[i].input_port, 1570 txmsg->reply.u.link_addr.ports[i].peer_device_type, 1571 txmsg->reply.u.link_addr.ports[i].port_number, 1572 txmsg->reply.u.link_addr.ports[i].dpcd_revision, 1573 txmsg->reply.u.link_addr.ports[i].mcs, 1574 txmsg->reply.u.link_addr.ports[i].ddps, 1575 txmsg->reply.u.link_addr.ports[i].legacy_device_plug_status, 1576 txmsg->reply.u.link_addr.ports[i].num_sdp_streams, 1577 txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks); 1578 } 1579 1580 drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid); 1581 1582 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) { 1583 drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]); 1584 } 1585 (*mgr->cbs->hotplug)(mgr); 1586 } 1587 } else { 1588 mstb->link_address_sent = false; 1589 DRM_DEBUG_KMS("link address failed %d\n", ret); 1590 } 1591 1592 kfree(txmsg); 1593} 1594 1595static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, 1596 struct drm_dp_mst_branch *mstb, 1597 struct drm_dp_mst_port *port) 1598{ 1599 int len; 1600 struct drm_dp_sideband_msg_tx *txmsg; 1601 int ret; 1602 1603 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 1604 if (!txmsg) 1605 return -ENOMEM; 1606 1607 txmsg->dst = mstb; 1608 len = build_enum_path_resources(txmsg, port->port_num); 1609 1610 drm_dp_queue_down_tx(mgr, txmsg); 1611 1612 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); 1613 if (ret > 0) { 1614 if (txmsg->reply.reply_type == 1) 1615 DRM_DEBUG_KMS("enum path resources nak received\n"); 1616 else { 1617 if (port->port_num != txmsg->reply.u.path_resources.port_number) 1618 DRM_ERROR("got incorrect port in response\n"); 1619 DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg->reply.u.path_resources.port_number, txmsg->reply.u.path_resources.full_payload_bw_number, 1620 txmsg->reply.u.path_resources.avail_payload_bw_number); 1621 port->available_pbn = txmsg->reply.u.path_resources.avail_payload_bw_number; 1622 } 1623 } 1624 1625 kfree(txmsg); 1626 return 0; 1627} 1628 1629static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb) 1630{ 1631 if (!mstb->port_parent) 1632 return NULL; 1633 1634 if (mstb->port_parent->mstb != mstb) 1635 return mstb->port_parent; 1636 1637 return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent); 1638} 1639 1640static struct drm_dp_mst_branch *drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr, 1641 struct drm_dp_mst_branch *mstb, 1642 int *port_num) 1643{ 1644 struct drm_dp_mst_branch *rmstb = NULL; 1645 struct drm_dp_mst_port *found_port; 1646 mutex_lock(&mgr->lock); 1647 if (mgr->mst_primary) { 1648 found_port = drm_dp_get_last_connected_port_to_mstb(mstb); 1649 1650 if (found_port) { 1651 rmstb = found_port->parent; 1652 kref_get(&rmstb->kref); 1653 *port_num = found_port->port_num; 1654 } 1655 } 1656 mutex_unlock(&mgr->lock); 1657 return rmstb; 1658} 1659 1660static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr, 1661 struct drm_dp_mst_port *port, 1662 int id, 1663 int pbn) 1664{ 1665 struct drm_dp_sideband_msg_tx *txmsg; 1666 struct drm_dp_mst_branch *mstb; 1667 int len, ret, port_num; 1668 u8 sinks[DRM_DP_MAX_SDP_STREAMS]; 1669 int i; 1670 1671 port = drm_dp_get_validated_port_ref(mgr, port); 1672 if (!port) 1673 return -EINVAL; 1674 1675 port_num = port->port_num; 1676 mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent); 1677 if (!mstb) { 1678 mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num); 1679 1680 if (!mstb) { 1681 drm_dp_put_port(port); 1682 return -EINVAL; 1683 } 1684 } 1685 1686 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 1687 if (!txmsg) { 1688 ret = -ENOMEM; 1689 goto fail_put; 1690 } 1691 1692 for (i = 0; i < port->num_sdp_streams; i++) 1693 sinks[i] = i; 1694 1695 txmsg->dst = mstb; 1696 len = build_allocate_payload(txmsg, port_num, 1697 id, 1698 pbn, port->num_sdp_streams, sinks); 1699 1700 drm_dp_queue_down_tx(mgr, txmsg); 1701 1702 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); 1703 if (ret > 0) { 1704 if (txmsg->reply.reply_type == 1) { 1705 ret = -EINVAL; 1706 } else 1707 ret = 0; 1708 } 1709 kfree(txmsg); 1710fail_put: 1711 drm_dp_put_mst_branch_device(mstb); 1712 drm_dp_put_port(port); 1713 return ret; 1714} 1715 1716static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr, 1717 int id, 1718 struct drm_dp_payload *payload) 1719{ 1720 int ret; 1721 1722 ret = drm_dp_dpcd_write_payload(mgr, id, payload); 1723 if (ret < 0) { 1724 payload->payload_state = 0; 1725 return ret; 1726 } 1727 payload->payload_state = DP_PAYLOAD_LOCAL; 1728 return 0; 1729} 1730 1731static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr, 1732 struct drm_dp_mst_port *port, 1733 int id, 1734 struct drm_dp_payload *payload) 1735{ 1736 int ret; 1737 ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn); 1738 if (ret < 0) 1739 return ret; 1740 payload->payload_state = DP_PAYLOAD_REMOTE; 1741 return ret; 1742} 1743 1744static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr, 1745 struct drm_dp_mst_port *port, 1746 int id, 1747 struct drm_dp_payload *payload) 1748{ 1749 DRM_DEBUG_KMS("\n"); 1750 /* its okay for these to fail */ 1751 if (port) { 1752 drm_dp_payload_send_msg(mgr, port, id, 0); 1753 } 1754 1755 drm_dp_dpcd_write_payload(mgr, id, payload); 1756 payload->payload_state = DP_PAYLOAD_DELETE_LOCAL; 1757 return 0; 1758} 1759 1760static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr, 1761 int id, 1762 struct drm_dp_payload *payload) 1763{ 1764 payload->payload_state = 0; 1765 return 0; 1766} 1767 1768/** 1769 * drm_dp_update_payload_part1() - Execute payload update part 1 1770 * @mgr: manager to use. 1771 * 1772 * This iterates over all proposed virtual channels, and tries to 1773 * allocate space in the link for them. For 0->slots transitions, 1774 * this step just writes the VCPI to the MST device. For slots->0 1775 * transitions, this writes the updated VCPIs and removes the 1776 * remote VC payloads. 1777 * 1778 * after calling this the driver should generate ACT and payload 1779 * packets. 1780 */ 1781int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr) 1782{ 1783 int i, j; 1784 int cur_slots = 1; 1785 struct drm_dp_payload req_payload; 1786 struct drm_dp_mst_port *port; 1787 1788 mutex_lock(&mgr->payload_lock); 1789 for (i = 0; i < mgr->max_payloads; i++) { 1790 /* solve the current payloads - compare to the hw ones 1791 - update the hw view */ 1792 req_payload.start_slot = cur_slots; 1793 if (mgr->proposed_vcpis[i]) { 1794 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi); 1795 port = drm_dp_get_validated_port_ref(mgr, port); 1796 if (!port) { 1797 mutex_unlock(&mgr->payload_lock); 1798 return -EINVAL; 1799 } 1800 req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots; 1801 req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi; 1802 } else { 1803 port = NULL; 1804 req_payload.num_slots = 0; 1805 } 1806 1807 if (mgr->payloads[i].start_slot != req_payload.start_slot) { 1808 mgr->payloads[i].start_slot = req_payload.start_slot; 1809 } 1810 /* work out what is required to happen with this payload */ 1811 if (mgr->payloads[i].num_slots != req_payload.num_slots) { 1812 1813 /* need to push an update for this payload */ 1814 if (req_payload.num_slots) { 1815 drm_dp_create_payload_step1(mgr, mgr->proposed_vcpis[i]->vcpi, &req_payload); 1816 mgr->payloads[i].num_slots = req_payload.num_slots; 1817 mgr->payloads[i].vcpi = req_payload.vcpi; 1818 } else if (mgr->payloads[i].num_slots) { 1819 mgr->payloads[i].num_slots = 0; 1820 drm_dp_destroy_payload_step1(mgr, port, mgr->payloads[i].vcpi, &mgr->payloads[i]); 1821 req_payload.payload_state = mgr->payloads[i].payload_state; 1822 mgr->payloads[i].start_slot = 0; 1823 } 1824 mgr->payloads[i].payload_state = req_payload.payload_state; 1825 } 1826 cur_slots += req_payload.num_slots; 1827 1828 if (port) 1829 drm_dp_put_port(port); 1830 } 1831 1832 for (i = 0; i < mgr->max_payloads; i++) { 1833 if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) { 1834 DRM_DEBUG_KMS("removing payload %d\n", i); 1835 for (j = i; j < mgr->max_payloads - 1; j++) { 1836 memcpy(&mgr->payloads[j], &mgr->payloads[j + 1], sizeof(struct drm_dp_payload)); 1837 mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1]; 1838 if (mgr->proposed_vcpis[j] && mgr->proposed_vcpis[j]->num_slots) { 1839 set_bit(j + 1, &mgr->payload_mask); 1840 } else { 1841 clear_bit(j + 1, &mgr->payload_mask); 1842 } 1843 } 1844 memset(&mgr->payloads[mgr->max_payloads - 1], 0, sizeof(struct drm_dp_payload)); 1845 mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL; 1846 clear_bit(mgr->max_payloads, &mgr->payload_mask); 1847 1848 } 1849 } 1850 mutex_unlock(&mgr->payload_lock); 1851 1852 return 0; 1853} 1854EXPORT_SYMBOL(drm_dp_update_payload_part1); 1855 1856/** 1857 * drm_dp_update_payload_part2() - Execute payload update part 2 1858 * @mgr: manager to use. 1859 * 1860 * This iterates over all proposed virtual channels, and tries to 1861 * allocate space in the link for them. For 0->slots transitions, 1862 * this step writes the remote VC payload commands. For slots->0 1863 * this just resets some internal state. 1864 */ 1865int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr) 1866{ 1867 struct drm_dp_mst_port *port; 1868 int i; 1869 int ret = 0; 1870 mutex_lock(&mgr->payload_lock); 1871 for (i = 0; i < mgr->max_payloads; i++) { 1872 1873 if (!mgr->proposed_vcpis[i]) 1874 continue; 1875 1876 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi); 1877 1878 DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state); 1879 if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) { 1880 ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]); 1881 } else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) { 1882 ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]); 1883 } 1884 if (ret) { 1885 mutex_unlock(&mgr->payload_lock); 1886 return ret; 1887 } 1888 } 1889 mutex_unlock(&mgr->payload_lock); 1890 return 0; 1891} 1892EXPORT_SYMBOL(drm_dp_update_payload_part2); 1893 1894#if 0 /* unused as of yet */ 1895static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr, 1896 struct drm_dp_mst_port *port, 1897 int offset, int size) 1898{ 1899 int len; 1900 struct drm_dp_sideband_msg_tx *txmsg; 1901 1902 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 1903 if (!txmsg) 1904 return -ENOMEM; 1905 1906 len = build_dpcd_read(txmsg, port->port_num, 0, 8); 1907 txmsg->dst = port->parent; 1908 1909 drm_dp_queue_down_tx(mgr, txmsg); 1910 1911 return 0; 1912} 1913#endif 1914 1915static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr, 1916 struct drm_dp_mst_port *port, 1917 int offset, int size, u8 *bytes) 1918{ 1919 int len; 1920 int ret; 1921 struct drm_dp_sideband_msg_tx *txmsg; 1922 struct drm_dp_mst_branch *mstb; 1923 1924 mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent); 1925 if (!mstb) 1926 return -EINVAL; 1927 1928 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 1929 if (!txmsg) { 1930 ret = -ENOMEM; 1931 goto fail_put; 1932 } 1933 1934 len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes); 1935 txmsg->dst = mstb; 1936 1937 drm_dp_queue_down_tx(mgr, txmsg); 1938 1939 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); 1940 if (ret > 0) { 1941 if (txmsg->reply.reply_type == 1) { 1942 ret = -EINVAL; 1943 } else 1944 ret = 0; 1945 } 1946 kfree(txmsg); 1947fail_put: 1948 drm_dp_put_mst_branch_device(mstb); 1949 return ret; 1950} 1951 1952static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type) 1953{ 1954 struct drm_dp_sideband_msg_reply_body reply; 1955 1956 reply.reply_type = 0; 1957 reply.req_type = req_type; 1958 drm_dp_encode_sideband_reply(&reply, msg); 1959 return 0; 1960} 1961 1962static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr, 1963 struct drm_dp_mst_branch *mstb, 1964 int req_type, int seqno, bool broadcast) 1965{ 1966 struct drm_dp_sideband_msg_tx *txmsg; 1967 1968 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 1969 if (!txmsg) 1970 return -ENOMEM; 1971 1972 txmsg->dst = mstb; 1973 txmsg->seqno = seqno; 1974 drm_dp_encode_up_ack_reply(txmsg, req_type); 1975 1976 mutex_lock(&mgr->qlock); 1977 1978 process_single_up_tx_qlock(mgr, txmsg); 1979 1980 mutex_unlock(&mgr->qlock); 1981 1982 kfree(txmsg); 1983 return 0; 1984} 1985 1986static bool drm_dp_get_vc_payload_bw(int dp_link_bw, 1987 int dp_link_count, 1988 int *out) 1989{ 1990 switch (dp_link_bw) { 1991 default: 1992 DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n", 1993 dp_link_bw, dp_link_count); 1994 return false; 1995 1996 case DP_LINK_BW_1_62: 1997 *out = 3 * dp_link_count; 1998 break; 1999 case DP_LINK_BW_2_7: 2000 *out = 5 * dp_link_count; 2001 break; 2002 case DP_LINK_BW_5_4: 2003 *out = 10 * dp_link_count; 2004 break; 2005 } 2006 return true; 2007} 2008 2009/** 2010 * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager 2011 * @mgr: manager to set state for 2012 * @mst_state: true to enable MST on this connector - false to disable. 2013 * 2014 * This is called by the driver when it detects an MST capable device plugged 2015 * into a DP MST capable port, or when a DP MST capable device is unplugged. 2016 */ 2017int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state) 2018{ 2019 int ret = 0; 2020 struct drm_dp_mst_branch *mstb = NULL; 2021 2022 mutex_lock(&mgr->lock); 2023 if (mst_state == mgr->mst_state) 2024 goto out_unlock; 2025 2026 mgr->mst_state = mst_state; 2027 /* set the device into MST mode */ 2028 if (mst_state) { 2029 WARN_ON(mgr->mst_primary); 2030 2031 /* get dpcd info */ 2032 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE); 2033 if (ret != DP_RECEIVER_CAP_SIZE) { 2034 DRM_DEBUG_KMS("failed to read DPCD\n"); 2035 goto out_unlock; 2036 } 2037 2038 if (!drm_dp_get_vc_payload_bw(mgr->dpcd[1], 2039 mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK, 2040 &mgr->pbn_div)) { 2041 ret = -EINVAL; 2042 goto out_unlock; 2043 } 2044 2045 mgr->total_pbn = 2560; 2046 mgr->total_slots = DIV_ROUND_UP(mgr->total_pbn, mgr->pbn_div); 2047 mgr->avail_slots = mgr->total_slots; 2048 2049 /* add initial branch device at LCT 1 */ 2050 mstb = drm_dp_add_mst_branch_device(1, NULL); 2051 if (mstb == NULL) { 2052 ret = -ENOMEM; 2053 goto out_unlock; 2054 } 2055 mstb->mgr = mgr; 2056 2057 /* give this the main reference */ 2058 mgr->mst_primary = mstb; 2059 kref_get(&mgr->mst_primary->kref); 2060 2061 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 2062 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC); 2063 if (ret < 0) { 2064 goto out_unlock; 2065 } 2066 2067 { 2068 struct drm_dp_payload reset_pay; 2069 reset_pay.start_slot = 0; 2070 reset_pay.num_slots = 0x3f; 2071 drm_dp_dpcd_write_payload(mgr, 0, &reset_pay); 2072 } 2073 2074 queue_work(system_long_wq, &mgr->work); 2075 2076 ret = 0; 2077 } else { 2078 /* disable MST on the device */ 2079 mstb = mgr->mst_primary; 2080 mgr->mst_primary = NULL; 2081 /* this can fail if the device is gone */ 2082 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0); 2083 ret = 0; 2084 memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload)); 2085 mgr->payload_mask = 0; 2086 set_bit(0, &mgr->payload_mask); 2087 mgr->vcpi_mask = 0; 2088 } 2089 2090out_unlock: 2091 mutex_unlock(&mgr->lock); 2092 if (mstb) 2093 drm_dp_put_mst_branch_device(mstb); 2094 return ret; 2095 2096} 2097EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst); 2098 2099/** 2100 * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager 2101 * @mgr: manager to suspend 2102 * 2103 * This function tells the MST device that we can't handle UP messages 2104 * anymore. This should stop it from sending any since we are suspended. 2105 */ 2106void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr) 2107{ 2108 mutex_lock(&mgr->lock); 2109 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 2110 DP_MST_EN | DP_UPSTREAM_IS_SRC); 2111 mutex_unlock(&mgr->lock); 2112 flush_work(&mgr->work); 2113 flush_work(&mgr->destroy_connector_work); 2114} 2115EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend); 2116 2117/** 2118 * drm_dp_mst_topology_mgr_resume() - resume the MST manager 2119 * @mgr: manager to resume 2120 * 2121 * This will fetch DPCD and see if the device is still there, 2122 * if it is, it will rewrite the MSTM control bits, and return. 2123 * 2124 * if the device fails this returns -1, and the driver should do 2125 * a full MST reprobe, in case we were undocked. 2126 */ 2127int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr) 2128{ 2129 int ret = 0; 2130 2131 mutex_lock(&mgr->lock); 2132 2133 if (mgr->mst_primary) { 2134 int sret; 2135 u8 guid[16]; 2136 2137 sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE); 2138 if (sret != DP_RECEIVER_CAP_SIZE) { 2139 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n"); 2140 ret = -1; 2141 goto out_unlock; 2142 } 2143 2144 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 2145 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC); 2146 if (ret < 0) { 2147 DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n"); 2148 ret = -1; 2149 goto out_unlock; 2150 } 2151 2152 /* Some hubs forget their guids after they resume */ 2153 sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16); 2154 if (sret != 16) { 2155 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n"); 2156 ret = -1; 2157 goto out_unlock; 2158 } 2159 drm_dp_check_mstb_guid(mgr->mst_primary, guid); 2160 2161 ret = 0; 2162 } else 2163 ret = -1; 2164 2165out_unlock: 2166 mutex_unlock(&mgr->lock); 2167 return ret; 2168} 2169EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume); 2170 2171static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up) 2172{ 2173 int len; 2174 u8 replyblock[32]; 2175 int replylen, origlen, curreply; 2176 int ret; 2177 struct drm_dp_sideband_msg_rx *msg; 2178 int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE; 2179 msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv; 2180 2181 len = min(mgr->max_dpcd_transaction_bytes, 16); 2182 ret = drm_dp_dpcd_read(mgr->aux, basereg, 2183 replyblock, len); 2184 if (ret != len) { 2185 DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret); 2186 return; 2187 } 2188 ret = drm_dp_sideband_msg_build(msg, replyblock, len, true); 2189 if (!ret) { 2190 DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]); 2191 return; 2192 } 2193 replylen = msg->curchunk_len + msg->curchunk_hdrlen; 2194 2195 origlen = replylen; 2196 replylen -= len; 2197 curreply = len; 2198 while (replylen > 0) { 2199 len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16); 2200 ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply, 2201 replyblock, len); 2202 if (ret != len) { 2203 DRM_DEBUG_KMS("failed to read a chunk\n"); 2204 } 2205 ret = drm_dp_sideband_msg_build(msg, replyblock, len, false); 2206 if (ret == false) 2207 DRM_DEBUG_KMS("failed to build sideband msg\n"); 2208 curreply += len; 2209 replylen -= len; 2210 } 2211} 2212 2213static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) 2214{ 2215 int ret = 0; 2216 2217 drm_dp_get_one_sb_msg(mgr, false); 2218 2219 if (mgr->down_rep_recv.have_eomt) { 2220 struct drm_dp_sideband_msg_tx *txmsg; 2221 struct drm_dp_mst_branch *mstb; 2222 int slot = -1; 2223 mstb = drm_dp_get_mst_branch_device(mgr, 2224 mgr->down_rep_recv.initial_hdr.lct, 2225 mgr->down_rep_recv.initial_hdr.rad); 2226 2227 if (!mstb) { 2228 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->down_rep_recv.initial_hdr.lct); 2229 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); 2230 return 0; 2231 } 2232 2233 /* find the message */ 2234 slot = mgr->down_rep_recv.initial_hdr.seqno; 2235 mutex_lock(&mgr->qlock); 2236 txmsg = mstb->tx_slots[slot]; 2237 /* remove from slots */ 2238 mutex_unlock(&mgr->qlock); 2239 2240 if (!txmsg) { 2241 DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n", 2242 mstb, 2243 mgr->down_rep_recv.initial_hdr.seqno, 2244 mgr->down_rep_recv.initial_hdr.lct, 2245 mgr->down_rep_recv.initial_hdr.rad[0], 2246 mgr->down_rep_recv.msg[0]); 2247 drm_dp_put_mst_branch_device(mstb); 2248 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); 2249 return 0; 2250 } 2251 2252 drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply); 2253 if (txmsg->reply.reply_type == 1) { 2254 DRM_DEBUG_KMS("Got NAK reply: req 0x%02x, reason 0x%02x, nak data 0x%02x\n", txmsg->reply.req_type, txmsg->reply.u.nak.reason, txmsg->reply.u.nak.nak_data); 2255 } 2256 2257 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); 2258 drm_dp_put_mst_branch_device(mstb); 2259 2260 mutex_lock(&mgr->qlock); 2261 txmsg->state = DRM_DP_SIDEBAND_TX_RX; 2262 mstb->tx_slots[slot] = NULL; 2263 mutex_unlock(&mgr->qlock); 2264 2265 wake_up(&mgr->tx_waitq); 2266 } 2267 return ret; 2268} 2269 2270static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) 2271{ 2272 int ret = 0; 2273 drm_dp_get_one_sb_msg(mgr, true); 2274 2275 if (mgr->up_req_recv.have_eomt) { 2276 struct drm_dp_sideband_msg_req_body msg; 2277 struct drm_dp_mst_branch *mstb = NULL; 2278 bool seqno; 2279 2280 if (!mgr->up_req_recv.initial_hdr.broadcast) { 2281 mstb = drm_dp_get_mst_branch_device(mgr, 2282 mgr->up_req_recv.initial_hdr.lct, 2283 mgr->up_req_recv.initial_hdr.rad); 2284 if (!mstb) { 2285 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct); 2286 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); 2287 return 0; 2288 } 2289 } 2290 2291 seqno = mgr->up_req_recv.initial_hdr.seqno; 2292 drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg); 2293 2294 if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) { 2295 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false); 2296 2297 if (!mstb) 2298 mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.conn_stat.guid); 2299 2300 if (!mstb) { 2301 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct); 2302 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); 2303 return 0; 2304 } 2305 2306 drm_dp_update_port(mstb, &msg.u.conn_stat); 2307 2308 DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type); 2309 (*mgr->cbs->hotplug)(mgr); 2310 2311 } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) { 2312 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false); 2313 if (!mstb) 2314 mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.resource_stat.guid); 2315 2316 if (!mstb) { 2317 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct); 2318 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); 2319 return 0; 2320 } 2321 2322 DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn); 2323 } 2324 2325 drm_dp_put_mst_branch_device(mstb); 2326 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); 2327 } 2328 return ret; 2329} 2330 2331/** 2332 * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify 2333 * @mgr: manager to notify irq for. 2334 * @esi: 4 bytes from SINK_COUNT_ESI 2335 * @handled: whether the hpd interrupt was consumed or not 2336 * 2337 * This should be called from the driver when it detects a short IRQ, 2338 * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The 2339 * topology manager will process the sideband messages received as a result 2340 * of this. 2341 */ 2342int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled) 2343{ 2344 int ret = 0; 2345 int sc; 2346 *handled = false; 2347 sc = esi[0] & 0x3f; 2348 2349 if (sc != mgr->sink_count) { 2350 mgr->sink_count = sc; 2351 *handled = true; 2352 } 2353 2354 if (esi[1] & DP_DOWN_REP_MSG_RDY) { 2355 ret = drm_dp_mst_handle_down_rep(mgr); 2356 *handled = true; 2357 } 2358 2359 if (esi[1] & DP_UP_REQ_MSG_RDY) { 2360 ret |= drm_dp_mst_handle_up_req(mgr); 2361 *handled = true; 2362 } 2363 2364 drm_dp_mst_kick_tx(mgr); 2365 return ret; 2366} 2367EXPORT_SYMBOL(drm_dp_mst_hpd_irq); 2368 2369/** 2370 * drm_dp_mst_detect_port() - get connection status for an MST port 2371 * @connector: DRM connector for this port 2372 * @mgr: manager for this port 2373 * @port: unverified pointer to a port 2374 * 2375 * This returns the current connection state for a port. It validates the 2376 * port pointer still exists so the caller doesn't require a reference 2377 */ 2378enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector, 2379 struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) 2380{ 2381 enum drm_connector_status status = connector_status_disconnected; 2382 2383 /* we need to search for the port in the mgr in case its gone */ 2384 port = drm_dp_get_validated_port_ref(mgr, port); 2385 if (!port) 2386 return connector_status_disconnected; 2387 2388 if (!port->ddps) 2389 goto out; 2390 2391 switch (port->pdt) { 2392 case DP_PEER_DEVICE_NONE: 2393 case DP_PEER_DEVICE_MST_BRANCHING: 2394 break; 2395 2396 case DP_PEER_DEVICE_SST_SINK: 2397 status = connector_status_connected; 2398 /* for logical ports - cache the EDID */ 2399 if (port->port_num >= 8 && !port->cached_edid) { 2400 port->cached_edid = drm_get_edid(connector, &port->aux.ddc); 2401 } 2402 break; 2403 case DP_PEER_DEVICE_DP_LEGACY_CONV: 2404 if (port->ldps) 2405 status = connector_status_connected; 2406 break; 2407 } 2408out: 2409 drm_dp_put_port(port); 2410 return status; 2411} 2412EXPORT_SYMBOL(drm_dp_mst_detect_port); 2413 2414/** 2415 * drm_dp_mst_port_has_audio() - Check whether port has audio capability or not 2416 * @mgr: manager for this port 2417 * @port: unverified pointer to a port. 2418 * 2419 * This returns whether the port supports audio or not. 2420 */ 2421bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr, 2422 struct drm_dp_mst_port *port) 2423{ 2424 bool ret = false; 2425 2426 port = drm_dp_get_validated_port_ref(mgr, port); 2427 if (!port) 2428 return ret; 2429 ret = port->has_audio; 2430 drm_dp_put_port(port); 2431 return ret; 2432} 2433EXPORT_SYMBOL(drm_dp_mst_port_has_audio); 2434 2435/** 2436 * drm_dp_mst_get_edid() - get EDID for an MST port 2437 * @connector: toplevel connector to get EDID for 2438 * @mgr: manager for this port 2439 * @port: unverified pointer to a port. 2440 * 2441 * This returns an EDID for the port connected to a connector, 2442 * It validates the pointer still exists so the caller doesn't require a 2443 * reference. 2444 */ 2445struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) 2446{ 2447 struct edid *edid = NULL; 2448 2449 /* we need to search for the port in the mgr in case its gone */ 2450 port = drm_dp_get_validated_port_ref(mgr, port); 2451 if (!port) 2452 return NULL; 2453 2454 if (port->cached_edid) 2455 edid = drm_edid_duplicate(port->cached_edid); 2456 else { 2457 edid = drm_get_edid(connector, &port->aux.ddc); 2458 drm_mode_connector_set_tile_property(connector); 2459 } 2460 port->has_audio = drm_detect_monitor_audio(edid); 2461 drm_dp_put_port(port); 2462 return edid; 2463} 2464EXPORT_SYMBOL(drm_dp_mst_get_edid); 2465 2466/** 2467 * drm_dp_find_vcpi_slots() - find slots for this PBN value 2468 * @mgr: manager to use 2469 * @pbn: payload bandwidth to convert into slots. 2470 */ 2471int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, 2472 int pbn) 2473{ 2474 int num_slots; 2475 2476 num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div); 2477 2478 if (num_slots > mgr->avail_slots) 2479 return -ENOSPC; 2480 return num_slots; 2481} 2482EXPORT_SYMBOL(drm_dp_find_vcpi_slots); 2483 2484static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr, 2485 struct drm_dp_vcpi *vcpi, int pbn) 2486{ 2487 int num_slots; 2488 int ret; 2489 2490 num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div); 2491 2492 if (num_slots > mgr->avail_slots) 2493 return -ENOSPC; 2494 2495 vcpi->pbn = pbn; 2496 vcpi->aligned_pbn = num_slots * mgr->pbn_div; 2497 vcpi->num_slots = num_slots; 2498 2499 ret = drm_dp_mst_assign_payload_id(mgr, vcpi); 2500 if (ret < 0) 2501 return ret; 2502 return 0; 2503} 2504 2505/** 2506 * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel 2507 * @mgr: manager for this port 2508 * @port: port to allocate a virtual channel for. 2509 * @pbn: payload bandwidth number to request 2510 * @slots: returned number of slots for this PBN. 2511 */ 2512bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int pbn, int *slots) 2513{ 2514 int ret; 2515 2516 port = drm_dp_get_validated_port_ref(mgr, port); 2517 if (!port) 2518 return false; 2519 2520 if (port->vcpi.vcpi > 0) { 2521 DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn); 2522 if (pbn == port->vcpi.pbn) { 2523 *slots = port->vcpi.num_slots; 2524 drm_dp_put_port(port); 2525 return true; 2526 } 2527 } 2528 2529 ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn); 2530 if (ret) { 2531 DRM_DEBUG_KMS("failed to init vcpi %d %d %d\n", DIV_ROUND_UP(pbn, mgr->pbn_div), mgr->avail_slots, ret); 2532 goto out; 2533 } 2534 DRM_DEBUG_KMS("initing vcpi for %d %d\n", pbn, port->vcpi.num_slots); 2535 *slots = port->vcpi.num_slots; 2536 2537 drm_dp_put_port(port); 2538 return true; 2539out: 2540 return false; 2541} 2542EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi); 2543 2544int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) 2545{ 2546 int slots = 0; 2547 port = drm_dp_get_validated_port_ref(mgr, port); 2548 if (!port) 2549 return slots; 2550 2551 slots = port->vcpi.num_slots; 2552 drm_dp_put_port(port); 2553 return slots; 2554} 2555EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots); 2556 2557/** 2558 * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI 2559 * @mgr: manager for this port 2560 * @port: unverified pointer to a port. 2561 * 2562 * This just resets the number of slots for the ports VCPI for later programming. 2563 */ 2564void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) 2565{ 2566 port = drm_dp_get_validated_port_ref(mgr, port); 2567 if (!port) 2568 return; 2569 port->vcpi.num_slots = 0; 2570 drm_dp_put_port(port); 2571} 2572EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots); 2573 2574/** 2575 * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI 2576 * @mgr: manager for this port 2577 * @port: unverified port to deallocate vcpi for 2578 */ 2579void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) 2580{ 2581 port = drm_dp_get_validated_port_ref(mgr, port); 2582 if (!port) 2583 return; 2584 2585 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); 2586 port->vcpi.num_slots = 0; 2587 port->vcpi.pbn = 0; 2588 port->vcpi.aligned_pbn = 0; 2589 port->vcpi.vcpi = 0; 2590 drm_dp_put_port(port); 2591} 2592EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi); 2593 2594static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr, 2595 int id, struct drm_dp_payload *payload) 2596{ 2597 u8 payload_alloc[3], status; 2598 int ret; 2599 int retries = 0; 2600 2601 drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, 2602 DP_PAYLOAD_TABLE_UPDATED); 2603 2604 payload_alloc[0] = id; 2605 payload_alloc[1] = payload->start_slot; 2606 payload_alloc[2] = payload->num_slots; 2607 2608 ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3); 2609 if (ret != 3) { 2610 DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret); 2611 goto fail; 2612 } 2613 2614retry: 2615 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status); 2616 if (ret < 0) { 2617 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret); 2618 goto fail; 2619 } 2620 2621 if (!(status & DP_PAYLOAD_TABLE_UPDATED)) { 2622 retries++; 2623 if (retries < 20) { 2624 usleep_range(10000, 20000); 2625 goto retry; 2626 } 2627 DRM_DEBUG_KMS("status not set after read payload table status %d\n", status); 2628 ret = -EINVAL; 2629 goto fail; 2630 } 2631 ret = 0; 2632fail: 2633 return ret; 2634} 2635 2636 2637/** 2638 * drm_dp_check_act_status() - Check ACT handled status. 2639 * @mgr: manager to use 2640 * 2641 * Check the payload status bits in the DPCD for ACT handled completion. 2642 */ 2643int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr) 2644{ 2645 u8 status; 2646 int ret; 2647 int count = 0; 2648 2649 do { 2650 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status); 2651 2652 if (ret < 0) { 2653 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret); 2654 goto fail; 2655 } 2656 2657 if (status & DP_PAYLOAD_ACT_HANDLED) 2658 break; 2659 count++; 2660 udelay(100); 2661 2662 } while (count < 30); 2663 2664 if (!(status & DP_PAYLOAD_ACT_HANDLED)) { 2665 DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count); 2666 ret = -EINVAL; 2667 goto fail; 2668 } 2669 return 0; 2670fail: 2671 return ret; 2672} 2673EXPORT_SYMBOL(drm_dp_check_act_status); 2674 2675/** 2676 * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode. 2677 * @clock: dot clock for the mode 2678 * @bpp: bpp for the mode. 2679 * 2680 * This uses the formula in the spec to calculate the PBN value for a mode. 2681 */ 2682int drm_dp_calc_pbn_mode(int clock, int bpp) 2683{ 2684 u64 kbps; 2685 s64 peak_kbps; 2686 u32 numerator; 2687 u32 denominator; 2688 2689 kbps = clock * bpp; 2690 2691 /* 2692 * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006 2693 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on 2694 * common multiplier to render an integer PBN for all link rate/lane 2695 * counts combinations 2696 * calculate 2697 * peak_kbps *= (1006/1000) 2698 * peak_kbps *= (64/54) 2699 * peak_kbps *= 8 convert to bytes 2700 */ 2701 2702 numerator = 64 * 1006; 2703 denominator = 54 * 8 * 1000 * 1000; 2704 2705 kbps *= numerator; 2706 peak_kbps = drm_fixp_from_fraction(kbps, denominator); 2707 2708 return drm_fixp2int_ceil(peak_kbps); 2709} 2710EXPORT_SYMBOL(drm_dp_calc_pbn_mode); 2711 2712static int test_calc_pbn_mode(void) 2713{ 2714 int ret; 2715 ret = drm_dp_calc_pbn_mode(154000, 30); 2716 if (ret != 689) { 2717 DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n", 2718 154000, 30, 689, ret); 2719 return -EINVAL; 2720 } 2721 ret = drm_dp_calc_pbn_mode(234000, 30); 2722 if (ret != 1047) { 2723 DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n", 2724 234000, 30, 1047, ret); 2725 return -EINVAL; 2726 } 2727 ret = drm_dp_calc_pbn_mode(297000, 24); 2728 if (ret != 1063) { 2729 DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n", 2730 297000, 24, 1063, ret); 2731 return -EINVAL; 2732 } 2733 return 0; 2734} 2735 2736/* we want to kick the TX after we've ack the up/down IRQs. */ 2737static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr) 2738{ 2739 queue_work(system_long_wq, &mgr->tx_work); 2740} 2741 2742static void drm_dp_mst_dump_mstb(struct seq_file *m, 2743 struct drm_dp_mst_branch *mstb) 2744{ 2745 struct drm_dp_mst_port *port; 2746 int tabs = mstb->lct; 2747 char prefix[10]; 2748 int i; 2749 2750 for (i = 0; i < tabs; i++) 2751 prefix[i] = '\t'; 2752 prefix[i] = '\0'; 2753 2754 seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports); 2755 list_for_each_entry(port, &mstb->ports, next) { 2756 seq_printf(m, "%sport: %d: input: %d: pdt: %d, ddps: %d ldps: %d, sdp: %d/%d, %p, conn: %p\n", prefix, port->port_num, port->input, port->pdt, port->ddps, port->ldps, port->num_sdp_streams, port->num_sdp_stream_sinks, port, port->connector); 2757 if (port->mstb) 2758 drm_dp_mst_dump_mstb(m, port->mstb); 2759 } 2760} 2761 2762static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr, 2763 char *buf) 2764{ 2765 int ret; 2766 int i; 2767 for (i = 0; i < 4; i++) { 2768 ret = drm_dp_dpcd_read(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS + (i * 16), &buf[i * 16], 16); 2769 if (ret != 16) 2770 break; 2771 } 2772 if (i == 4) 2773 return true; 2774 return false; 2775} 2776 2777static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr, 2778 struct drm_dp_mst_port *port, char *name, 2779 int namelen) 2780{ 2781 struct edid *mst_edid; 2782 2783 mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port); 2784 drm_edid_get_monitor_name(mst_edid, name, namelen); 2785} 2786 2787/** 2788 * drm_dp_mst_dump_topology(): dump topology to seq file. 2789 * @m: seq_file to dump output to 2790 * @mgr: manager to dump current topology for. 2791 * 2792 * helper to dump MST topology to a seq file for debugfs. 2793 */ 2794void drm_dp_mst_dump_topology(struct seq_file *m, 2795 struct drm_dp_mst_topology_mgr *mgr) 2796{ 2797 int i; 2798 struct drm_dp_mst_port *port; 2799 2800 mutex_lock(&mgr->lock); 2801 if (mgr->mst_primary) 2802 drm_dp_mst_dump_mstb(m, mgr->mst_primary); 2803 2804 /* dump VCPIs */ 2805 mutex_unlock(&mgr->lock); 2806 2807 mutex_lock(&mgr->payload_lock); 2808 seq_printf(m, "vcpi: %lx %lx %d\n", mgr->payload_mask, mgr->vcpi_mask, 2809 mgr->max_payloads); 2810 2811 for (i = 0; i < mgr->max_payloads; i++) { 2812 if (mgr->proposed_vcpis[i]) { 2813 char name[14]; 2814 2815 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi); 2816 fetch_monitor_name(mgr, port, name, sizeof(name)); 2817 seq_printf(m, "vcpi %d: %d %d %d sink name: %s\n", i, 2818 port->port_num, port->vcpi.vcpi, 2819 port->vcpi.num_slots, 2820 (*name != 0) ? name : "Unknown"); 2821 } else 2822 seq_printf(m, "vcpi %d:unused\n", i); 2823 } 2824 for (i = 0; i < mgr->max_payloads; i++) { 2825 seq_printf(m, "payload %d: %d, %d, %d\n", 2826 i, 2827 mgr->payloads[i].payload_state, 2828 mgr->payloads[i].start_slot, 2829 mgr->payloads[i].num_slots); 2830 2831 2832 } 2833 mutex_unlock(&mgr->payload_lock); 2834 2835 mutex_lock(&mgr->lock); 2836 if (mgr->mst_primary) { 2837 u8 buf[64]; 2838 bool bret; 2839 int ret; 2840 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE); 2841 seq_printf(m, "dpcd: "); 2842 for (i = 0; i < DP_RECEIVER_CAP_SIZE; i++) 2843 seq_printf(m, "%02x ", buf[i]); 2844 seq_printf(m, "\n"); 2845 ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2); 2846 seq_printf(m, "faux/mst: "); 2847 for (i = 0; i < 2; i++) 2848 seq_printf(m, "%02x ", buf[i]); 2849 seq_printf(m, "\n"); 2850 ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1); 2851 seq_printf(m, "mst ctrl: "); 2852 for (i = 0; i < 1; i++) 2853 seq_printf(m, "%02x ", buf[i]); 2854 seq_printf(m, "\n"); 2855 2856 /* dump the standard OUI branch header */ 2857 ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE); 2858 seq_printf(m, "branch oui: "); 2859 for (i = 0; i < 0x3; i++) 2860 seq_printf(m, "%02x", buf[i]); 2861 seq_printf(m, " devid: "); 2862 for (i = 0x3; i < 0x8 && buf[i]; i++) 2863 seq_printf(m, "%c", buf[i]); 2864 2865 seq_printf(m, " revision: hw: %x.%x sw: %x.%x", buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]); 2866 seq_printf(m, "\n"); 2867 bret = dump_dp_payload_table(mgr, buf); 2868 if (bret == true) { 2869 seq_printf(m, "payload table: "); 2870 for (i = 0; i < 63; i++) 2871 seq_printf(m, "%02x ", buf[i]); 2872 seq_printf(m, "\n"); 2873 } 2874 2875 } 2876 2877 mutex_unlock(&mgr->lock); 2878 2879} 2880EXPORT_SYMBOL(drm_dp_mst_dump_topology); 2881 2882static void drm_dp_tx_work(struct work_struct *work) 2883{ 2884 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work); 2885 2886 mutex_lock(&mgr->qlock); 2887 if (!list_empty(&mgr->tx_msg_downq)) 2888 process_single_down_tx_qlock(mgr); 2889 mutex_unlock(&mgr->qlock); 2890} 2891 2892static void drm_dp_free_mst_port(struct kref *kref) 2893{ 2894 struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref); 2895 kref_put(&port->parent->kref, drm_dp_free_mst_branch_device); 2896 kfree(port); 2897} 2898 2899static void drm_dp_destroy_connector_work(struct work_struct *work) 2900{ 2901 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work); 2902 struct drm_dp_mst_port *port; 2903 bool send_hotplug = false; 2904 /* 2905 * Not a regular list traverse as we have to drop the destroy 2906 * connector lock before destroying the connector, to avoid AB->BA 2907 * ordering between this lock and the config mutex. 2908 */ 2909 for (;;) { 2910 mutex_lock(&mgr->destroy_connector_lock); 2911 port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next); 2912 if (!port) { 2913 mutex_unlock(&mgr->destroy_connector_lock); 2914 break; 2915 } 2916 list_del(&port->next); 2917 mutex_unlock(&mgr->destroy_connector_lock); 2918 2919 kref_init(&port->kref); 2920 INIT_LIST_HEAD(&port->next); 2921 2922 mgr->cbs->destroy_connector(mgr, port->connector); 2923 2924 drm_dp_port_teardown_pdt(port, port->pdt); 2925 port->pdt = DP_PEER_DEVICE_NONE; 2926 2927 if (!port->input && port->vcpi.vcpi > 0) { 2928 drm_dp_mst_reset_vcpi_slots(mgr, port); 2929 drm_dp_update_payload_part1(mgr); 2930 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); 2931 } 2932 2933 kref_put(&port->kref, drm_dp_free_mst_port); 2934 send_hotplug = true; 2935 } 2936 if (send_hotplug) 2937 (*mgr->cbs->hotplug)(mgr); 2938} 2939 2940/** 2941 * drm_dp_mst_topology_mgr_init - initialise a topology manager 2942 * @mgr: manager struct to initialise 2943 * @dev: device providing this structure - for i2c addition. 2944 * @aux: DP helper aux channel to talk to this device 2945 * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit 2946 * @max_payloads: maximum number of payloads this GPU can source 2947 * @conn_base_id: the connector object ID the MST device is connected to. 2948 * 2949 * Return 0 for success, or negative error code on failure 2950 */ 2951int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, 2952 struct drm_device *dev, struct drm_dp_aux *aux, 2953 int max_dpcd_transaction_bytes, 2954 int max_payloads, int conn_base_id) 2955{ 2956 mutex_init(&mgr->lock); 2957 mutex_init(&mgr->qlock); 2958 mutex_init(&mgr->payload_lock); 2959 mutex_init(&mgr->destroy_connector_lock); 2960 INIT_LIST_HEAD(&mgr->tx_msg_downq); 2961 INIT_LIST_HEAD(&mgr->destroy_connector_list); 2962 INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work); 2963 INIT_WORK(&mgr->tx_work, drm_dp_tx_work); 2964 INIT_WORK(&mgr->destroy_connector_work, drm_dp_destroy_connector_work); 2965 init_waitqueue_head(&mgr->tx_waitq); 2966 mgr->dev = dev; 2967 mgr->aux = aux; 2968 mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes; 2969 mgr->max_payloads = max_payloads; 2970 mgr->conn_base_id = conn_base_id; 2971 if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 || 2972 max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8) 2973 return -EINVAL; 2974 mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL); 2975 if (!mgr->payloads) 2976 return -ENOMEM; 2977 mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL); 2978 if (!mgr->proposed_vcpis) 2979 return -ENOMEM; 2980 set_bit(0, &mgr->payload_mask); 2981 if (test_calc_pbn_mode() < 0) 2982 DRM_ERROR("MST PBN self-test failed\n"); 2983 2984 return 0; 2985} 2986EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init); 2987 2988/** 2989 * drm_dp_mst_topology_mgr_destroy() - destroy topology manager. 2990 * @mgr: manager to destroy 2991 */ 2992void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr) 2993{ 2994 flush_work(&mgr->work); 2995 flush_work(&mgr->destroy_connector_work); 2996 mutex_lock(&mgr->payload_lock); 2997 kfree(mgr->payloads); 2998 mgr->payloads = NULL; 2999 kfree(mgr->proposed_vcpis); 3000 mgr->proposed_vcpis = NULL; 3001 mutex_unlock(&mgr->payload_lock); 3002 mgr->dev = NULL; 3003 mgr->aux = NULL; 3004} 3005EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy); 3006 3007/* I2C device */ 3008static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, 3009 int num) 3010{ 3011 struct drm_dp_aux *aux = adapter->algo_data; 3012 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux); 3013 struct drm_dp_mst_branch *mstb; 3014 struct drm_dp_mst_topology_mgr *mgr = port->mgr; 3015 unsigned int i; 3016 bool reading = false; 3017 struct drm_dp_sideband_msg_req_body msg; 3018 struct drm_dp_sideband_msg_tx *txmsg = NULL; 3019 int ret; 3020 3021 mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent); 3022 if (!mstb) 3023 return -EREMOTEIO; 3024 3025 /* construct i2c msg */ 3026 /* see if last msg is a read */ 3027 if (msgs[num - 1].flags & I2C_M_RD) 3028 reading = true; 3029 3030 if (!reading || (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)) { 3031 DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n"); 3032 ret = -EIO; 3033 goto out; 3034 } 3035 3036 memset(&msg, 0, sizeof(msg)); 3037 msg.req_type = DP_REMOTE_I2C_READ; 3038 msg.u.i2c_read.num_transactions = num - 1; 3039 msg.u.i2c_read.port_number = port->port_num; 3040 for (i = 0; i < num - 1; i++) { 3041 msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr; 3042 msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len; 3043 msg.u.i2c_read.transactions[i].bytes = msgs[i].buf; 3044 } 3045 msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr; 3046 msg.u.i2c_read.num_bytes_read = msgs[num - 1].len; 3047 3048 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 3049 if (!txmsg) { 3050 ret = -ENOMEM; 3051 goto out; 3052 } 3053 3054 txmsg->dst = mstb; 3055 drm_dp_encode_sideband_req(&msg, txmsg); 3056 3057 drm_dp_queue_down_tx(mgr, txmsg); 3058 3059 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); 3060 if (ret > 0) { 3061 3062 if (txmsg->reply.reply_type == 1) { /* got a NAK back */ 3063 ret = -EREMOTEIO; 3064 goto out; 3065 } 3066 if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) { 3067 ret = -EIO; 3068 goto out; 3069 } 3070 memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len); 3071 ret = num; 3072 } 3073out: 3074 kfree(txmsg); 3075 drm_dp_put_mst_branch_device(mstb); 3076 return ret; 3077} 3078 3079static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter) 3080{ 3081 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | 3082 I2C_FUNC_SMBUS_READ_BLOCK_DATA | 3083 I2C_FUNC_SMBUS_BLOCK_PROC_CALL | 3084 I2C_FUNC_10BIT_ADDR; 3085} 3086 3087static const struct i2c_algorithm drm_dp_mst_i2c_algo = { 3088 .functionality = drm_dp_mst_i2c_functionality, 3089 .master_xfer = drm_dp_mst_i2c_xfer, 3090}; 3091 3092/** 3093 * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX 3094 * @aux: DisplayPort AUX channel 3095 * 3096 * Returns 0 on success or a negative error code on failure. 3097 */ 3098static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux) 3099{ 3100 aux->ddc.algo = &drm_dp_mst_i2c_algo; 3101 aux->ddc.algo_data = aux; 3102 aux->ddc.retries = 3; 3103 3104 aux->ddc.class = I2C_CLASS_DDC; 3105 aux->ddc.owner = THIS_MODULE; 3106 aux->ddc.dev.parent = aux->dev; 3107 aux->ddc.dev.of_node = aux->dev->of_node; 3108 3109 strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev), 3110 sizeof(aux->ddc.name)); 3111 3112 return i2c_add_adapter(&aux->ddc); 3113} 3114 3115/** 3116 * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter 3117 * @aux: DisplayPort AUX channel 3118 */ 3119static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux) 3120{ 3121 i2c_del_adapter(&aux->ddc); 3122}