Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.8-rc2 3118 lines 86 kB view raw
1/* 2 * Copyright © 2014 Red Hat 3 * 4 * Permission to use, copy, modify, distribute, and sell this software and its 5 * documentation for any purpose is hereby granted without fee, provided that 6 * the above copyright notice appear in all copies and that both that copyright 7 * notice and this permission notice appear in supporting documentation, and 8 * that the name of the copyright holders not be used in advertising or 9 * publicity pertaining to distribution of the software without specific, 10 * written prior permission. The copyright holders make no representations 11 * about the suitability of this software for any purpose. It is provided "as 12 * is" without express or implied warranty. 13 * 14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, 15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO 16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR 17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, 18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE 20 * OF THIS SOFTWARE. 21 */ 22 23#include <linux/kernel.h> 24#include <linux/delay.h> 25#include <linux/init.h> 26#include <linux/errno.h> 27#include <linux/sched.h> 28#include <linux/seq_file.h> 29#include <linux/i2c.h> 30#include <drm/drm_dp_mst_helper.h> 31#include <drm/drmP.h> 32 33#include <drm/drm_fixed.h> 34 35/** 36 * DOC: dp mst helper 37 * 38 * These functions contain parts of the DisplayPort 1.2a MultiStream Transport 39 * protocol. The helpers contain a topology manager and bandwidth manager. 40 * The helpers encapsulate the sending and received of sideband msgs. 41 */ 42static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr, 43 char *buf); 44static int test_calc_pbn_mode(void); 45 46static void drm_dp_put_port(struct drm_dp_mst_port *port); 47 48static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr, 49 int id, 50 struct drm_dp_payload *payload); 51 52static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr, 53 struct drm_dp_mst_port *port, 54 int offset, int size, u8 *bytes); 55 56static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, 57 struct drm_dp_mst_branch *mstb); 58static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, 59 struct drm_dp_mst_branch *mstb, 60 struct drm_dp_mst_port *port); 61static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr, 62 u8 *guid); 63 64static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux); 65static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux); 66static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr); 67/* sideband msg handling */ 68static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles) 69{ 70 u8 bitmask = 0x80; 71 u8 bitshift = 7; 72 u8 array_index = 0; 73 int number_of_bits = num_nibbles * 4; 74 u8 remainder = 0; 75 76 while (number_of_bits != 0) { 77 number_of_bits--; 78 remainder <<= 1; 79 remainder |= (data[array_index] & bitmask) >> bitshift; 80 bitmask >>= 1; 81 bitshift--; 82 if (bitmask == 0) { 83 bitmask = 0x80; 84 bitshift = 7; 85 array_index++; 86 } 87 if ((remainder & 0x10) == 0x10) 88 remainder ^= 0x13; 89 } 90 91 number_of_bits = 4; 92 while (number_of_bits != 0) { 93 number_of_bits--; 94 remainder <<= 1; 95 if ((remainder & 0x10) != 0) 96 remainder ^= 0x13; 97 } 98 99 return remainder; 100} 101 102static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes) 103{ 104 u8 bitmask = 0x80; 105 u8 bitshift = 7; 106 u8 array_index = 0; 107 int number_of_bits = number_of_bytes * 8; 108 u16 remainder = 0; 109 110 while (number_of_bits != 0) { 111 number_of_bits--; 112 remainder <<= 1; 113 remainder |= (data[array_index] & bitmask) >> bitshift; 114 bitmask >>= 1; 115 bitshift--; 116 if (bitmask == 0) { 117 bitmask = 0x80; 118 bitshift = 7; 119 array_index++; 120 } 121 if ((remainder & 0x100) == 0x100) 122 remainder ^= 0xd5; 123 } 124 125 number_of_bits = 8; 126 while (number_of_bits != 0) { 127 number_of_bits--; 128 remainder <<= 1; 129 if ((remainder & 0x100) != 0) 130 remainder ^= 0xd5; 131 } 132 133 return remainder & 0xff; 134} 135static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr) 136{ 137 u8 size = 3; 138 size += (hdr->lct / 2); 139 return size; 140} 141 142static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr, 143 u8 *buf, int *len) 144{ 145 int idx = 0; 146 int i; 147 u8 crc4; 148 buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf); 149 for (i = 0; i < (hdr->lct / 2); i++) 150 buf[idx++] = hdr->rad[i]; 151 buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) | 152 (hdr->msg_len & 0x3f); 153 buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4); 154 155 crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1); 156 buf[idx - 1] |= (crc4 & 0xf); 157 158 *len = idx; 159} 160 161static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr, 162 u8 *buf, int buflen, u8 *hdrlen) 163{ 164 u8 crc4; 165 u8 len; 166 int i; 167 u8 idx; 168 if (buf[0] == 0) 169 return false; 170 len = 3; 171 len += ((buf[0] & 0xf0) >> 4) / 2; 172 if (len > buflen) 173 return false; 174 crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1); 175 176 if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) { 177 DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]); 178 return false; 179 } 180 181 hdr->lct = (buf[0] & 0xf0) >> 4; 182 hdr->lcr = (buf[0] & 0xf); 183 idx = 1; 184 for (i = 0; i < (hdr->lct / 2); i++) 185 hdr->rad[i] = buf[idx++]; 186 hdr->broadcast = (buf[idx] >> 7) & 0x1; 187 hdr->path_msg = (buf[idx] >> 6) & 0x1; 188 hdr->msg_len = buf[idx] & 0x3f; 189 idx++; 190 hdr->somt = (buf[idx] >> 7) & 0x1; 191 hdr->eomt = (buf[idx] >> 6) & 0x1; 192 hdr->seqno = (buf[idx] >> 4) & 0x1; 193 idx++; 194 *hdrlen = idx; 195 return true; 196} 197 198static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req, 199 struct drm_dp_sideband_msg_tx *raw) 200{ 201 int idx = 0; 202 int i; 203 u8 *buf = raw->msg; 204 buf[idx++] = req->req_type & 0x7f; 205 206 switch (req->req_type) { 207 case DP_ENUM_PATH_RESOURCES: 208 buf[idx] = (req->u.port_num.port_number & 0xf) << 4; 209 idx++; 210 break; 211 case DP_ALLOCATE_PAYLOAD: 212 buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 | 213 (req->u.allocate_payload.number_sdp_streams & 0xf); 214 idx++; 215 buf[idx] = (req->u.allocate_payload.vcpi & 0x7f); 216 idx++; 217 buf[idx] = (req->u.allocate_payload.pbn >> 8); 218 idx++; 219 buf[idx] = (req->u.allocate_payload.pbn & 0xff); 220 idx++; 221 for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) { 222 buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) | 223 (req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf); 224 idx++; 225 } 226 if (req->u.allocate_payload.number_sdp_streams & 1) { 227 i = req->u.allocate_payload.number_sdp_streams - 1; 228 buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4; 229 idx++; 230 } 231 break; 232 case DP_QUERY_PAYLOAD: 233 buf[idx] = (req->u.query_payload.port_number & 0xf) << 4; 234 idx++; 235 buf[idx] = (req->u.query_payload.vcpi & 0x7f); 236 idx++; 237 break; 238 case DP_REMOTE_DPCD_READ: 239 buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4; 240 buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf; 241 idx++; 242 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8; 243 idx++; 244 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff); 245 idx++; 246 buf[idx] = (req->u.dpcd_read.num_bytes); 247 idx++; 248 break; 249 250 case DP_REMOTE_DPCD_WRITE: 251 buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4; 252 buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf; 253 idx++; 254 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8; 255 idx++; 256 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff); 257 idx++; 258 buf[idx] = (req->u.dpcd_write.num_bytes); 259 idx++; 260 memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes); 261 idx += req->u.dpcd_write.num_bytes; 262 break; 263 case DP_REMOTE_I2C_READ: 264 buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4; 265 buf[idx] |= (req->u.i2c_read.num_transactions & 0x3); 266 idx++; 267 for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) { 268 buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f; 269 idx++; 270 buf[idx] = req->u.i2c_read.transactions[i].num_bytes; 271 idx++; 272 memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes); 273 idx += req->u.i2c_read.transactions[i].num_bytes; 274 275 buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5; 276 buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf); 277 idx++; 278 } 279 buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f; 280 idx++; 281 buf[idx] = (req->u.i2c_read.num_bytes_read); 282 idx++; 283 break; 284 285 case DP_REMOTE_I2C_WRITE: 286 buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4; 287 idx++; 288 buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f; 289 idx++; 290 buf[idx] = (req->u.i2c_write.num_bytes); 291 idx++; 292 memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes); 293 idx += req->u.i2c_write.num_bytes; 294 break; 295 } 296 raw->cur_len = idx; 297} 298 299static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len) 300{ 301 u8 crc4; 302 crc4 = drm_dp_msg_data_crc4(msg, len); 303 msg[len] = crc4; 304} 305 306static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep, 307 struct drm_dp_sideband_msg_tx *raw) 308{ 309 int idx = 0; 310 u8 *buf = raw->msg; 311 312 buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f); 313 314 raw->cur_len = idx; 315} 316 317/* this adds a chunk of msg to the builder to get the final msg */ 318static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg, 319 u8 *replybuf, u8 replybuflen, bool hdr) 320{ 321 int ret; 322 u8 crc4; 323 324 if (hdr) { 325 u8 hdrlen; 326 struct drm_dp_sideband_msg_hdr recv_hdr; 327 ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen); 328 if (ret == false) { 329 print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false); 330 return false; 331 } 332 333 /* get length contained in this portion */ 334 msg->curchunk_len = recv_hdr.msg_len; 335 msg->curchunk_hdrlen = hdrlen; 336 337 /* we have already gotten an somt - don't bother parsing */ 338 if (recv_hdr.somt && msg->have_somt) 339 return false; 340 341 if (recv_hdr.somt) { 342 memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr)); 343 msg->have_somt = true; 344 } 345 if (recv_hdr.eomt) 346 msg->have_eomt = true; 347 348 /* copy the bytes for the remainder of this header chunk */ 349 msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen)); 350 memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx); 351 } else { 352 memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen); 353 msg->curchunk_idx += replybuflen; 354 } 355 356 if (msg->curchunk_idx >= msg->curchunk_len) { 357 /* do CRC */ 358 crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1); 359 /* copy chunk into bigger msg */ 360 memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1); 361 msg->curlen += msg->curchunk_len - 1; 362 } 363 return true; 364} 365 366static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw, 367 struct drm_dp_sideband_msg_reply_body *repmsg) 368{ 369 int idx = 1; 370 int i; 371 memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16); 372 idx += 16; 373 repmsg->u.link_addr.nports = raw->msg[idx] & 0xf; 374 idx++; 375 if (idx > raw->curlen) 376 goto fail_len; 377 for (i = 0; i < repmsg->u.link_addr.nports; i++) { 378 if (raw->msg[idx] & 0x80) 379 repmsg->u.link_addr.ports[i].input_port = 1; 380 381 repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7; 382 repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf); 383 384 idx++; 385 if (idx > raw->curlen) 386 goto fail_len; 387 repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1; 388 repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1; 389 if (repmsg->u.link_addr.ports[i].input_port == 0) 390 repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1; 391 idx++; 392 if (idx > raw->curlen) 393 goto fail_len; 394 if (repmsg->u.link_addr.ports[i].input_port == 0) { 395 repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]); 396 idx++; 397 if (idx > raw->curlen) 398 goto fail_len; 399 memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16); 400 idx += 16; 401 if (idx > raw->curlen) 402 goto fail_len; 403 repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf; 404 repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf); 405 idx++; 406 407 } 408 if (idx > raw->curlen) 409 goto fail_len; 410 } 411 412 return true; 413fail_len: 414 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen); 415 return false; 416} 417 418static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw, 419 struct drm_dp_sideband_msg_reply_body *repmsg) 420{ 421 int idx = 1; 422 repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf; 423 idx++; 424 if (idx > raw->curlen) 425 goto fail_len; 426 repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx]; 427 if (idx > raw->curlen) 428 goto fail_len; 429 430 memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes); 431 return true; 432fail_len: 433 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen); 434 return false; 435} 436 437static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw, 438 struct drm_dp_sideband_msg_reply_body *repmsg) 439{ 440 int idx = 1; 441 repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf; 442 idx++; 443 if (idx > raw->curlen) 444 goto fail_len; 445 return true; 446fail_len: 447 DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen); 448 return false; 449} 450 451static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw, 452 struct drm_dp_sideband_msg_reply_body *repmsg) 453{ 454 int idx = 1; 455 456 repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf); 457 idx++; 458 if (idx > raw->curlen) 459 goto fail_len; 460 repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx]; 461 idx++; 462 /* TODO check */ 463 memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes); 464 return true; 465fail_len: 466 DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen); 467 return false; 468} 469 470static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw, 471 struct drm_dp_sideband_msg_reply_body *repmsg) 472{ 473 int idx = 1; 474 repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf; 475 idx++; 476 if (idx > raw->curlen) 477 goto fail_len; 478 repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]); 479 idx += 2; 480 if (idx > raw->curlen) 481 goto fail_len; 482 repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]); 483 idx += 2; 484 if (idx > raw->curlen) 485 goto fail_len; 486 return true; 487fail_len: 488 DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen); 489 return false; 490} 491 492static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw, 493 struct drm_dp_sideband_msg_reply_body *repmsg) 494{ 495 int idx = 1; 496 repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf; 497 idx++; 498 if (idx > raw->curlen) 499 goto fail_len; 500 repmsg->u.allocate_payload.vcpi = raw->msg[idx]; 501 idx++; 502 if (idx > raw->curlen) 503 goto fail_len; 504 repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]); 505 idx += 2; 506 if (idx > raw->curlen) 507 goto fail_len; 508 return true; 509fail_len: 510 DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen); 511 return false; 512} 513 514static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw, 515 struct drm_dp_sideband_msg_reply_body *repmsg) 516{ 517 int idx = 1; 518 repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf; 519 idx++; 520 if (idx > raw->curlen) 521 goto fail_len; 522 repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]); 523 idx += 2; 524 if (idx > raw->curlen) 525 goto fail_len; 526 return true; 527fail_len: 528 DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen); 529 return false; 530} 531 532static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw, 533 struct drm_dp_sideband_msg_reply_body *msg) 534{ 535 memset(msg, 0, sizeof(*msg)); 536 msg->reply_type = (raw->msg[0] & 0x80) >> 7; 537 msg->req_type = (raw->msg[0] & 0x7f); 538 539 if (msg->reply_type) { 540 memcpy(msg->u.nak.guid, &raw->msg[1], 16); 541 msg->u.nak.reason = raw->msg[17]; 542 msg->u.nak.nak_data = raw->msg[18]; 543 return false; 544 } 545 546 switch (msg->req_type) { 547 case DP_LINK_ADDRESS: 548 return drm_dp_sideband_parse_link_address(raw, msg); 549 case DP_QUERY_PAYLOAD: 550 return drm_dp_sideband_parse_query_payload_ack(raw, msg); 551 case DP_REMOTE_DPCD_READ: 552 return drm_dp_sideband_parse_remote_dpcd_read(raw, msg); 553 case DP_REMOTE_DPCD_WRITE: 554 return drm_dp_sideband_parse_remote_dpcd_write(raw, msg); 555 case DP_REMOTE_I2C_READ: 556 return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg); 557 case DP_ENUM_PATH_RESOURCES: 558 return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg); 559 case DP_ALLOCATE_PAYLOAD: 560 return drm_dp_sideband_parse_allocate_payload_ack(raw, msg); 561 default: 562 DRM_ERROR("Got unknown reply 0x%02x\n", msg->req_type); 563 return false; 564 } 565} 566 567static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw, 568 struct drm_dp_sideband_msg_req_body *msg) 569{ 570 int idx = 1; 571 572 msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4; 573 idx++; 574 if (idx > raw->curlen) 575 goto fail_len; 576 577 memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16); 578 idx += 16; 579 if (idx > raw->curlen) 580 goto fail_len; 581 582 msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1; 583 msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1; 584 msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1; 585 msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1; 586 msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7); 587 idx++; 588 return true; 589fail_len: 590 DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen); 591 return false; 592} 593 594static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw, 595 struct drm_dp_sideband_msg_req_body *msg) 596{ 597 int idx = 1; 598 599 msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4; 600 idx++; 601 if (idx > raw->curlen) 602 goto fail_len; 603 604 memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16); 605 idx += 16; 606 if (idx > raw->curlen) 607 goto fail_len; 608 609 msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]); 610 idx++; 611 return true; 612fail_len: 613 DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen); 614 return false; 615} 616 617static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw, 618 struct drm_dp_sideband_msg_req_body *msg) 619{ 620 memset(msg, 0, sizeof(*msg)); 621 msg->req_type = (raw->msg[0] & 0x7f); 622 623 switch (msg->req_type) { 624 case DP_CONNECTION_STATUS_NOTIFY: 625 return drm_dp_sideband_parse_connection_status_notify(raw, msg); 626 case DP_RESOURCE_STATUS_NOTIFY: 627 return drm_dp_sideband_parse_resource_status_notify(raw, msg); 628 default: 629 DRM_ERROR("Got unknown request 0x%02x\n", msg->req_type); 630 return false; 631 } 632} 633 634static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes) 635{ 636 struct drm_dp_sideband_msg_req_body req; 637 638 req.req_type = DP_REMOTE_DPCD_WRITE; 639 req.u.dpcd_write.port_number = port_num; 640 req.u.dpcd_write.dpcd_address = offset; 641 req.u.dpcd_write.num_bytes = num_bytes; 642 req.u.dpcd_write.bytes = bytes; 643 drm_dp_encode_sideband_req(&req, msg); 644 645 return 0; 646} 647 648static int build_link_address(struct drm_dp_sideband_msg_tx *msg) 649{ 650 struct drm_dp_sideband_msg_req_body req; 651 652 req.req_type = DP_LINK_ADDRESS; 653 drm_dp_encode_sideband_req(&req, msg); 654 return 0; 655} 656 657static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num) 658{ 659 struct drm_dp_sideband_msg_req_body req; 660 661 req.req_type = DP_ENUM_PATH_RESOURCES; 662 req.u.port_num.port_number = port_num; 663 drm_dp_encode_sideband_req(&req, msg); 664 msg->path_msg = true; 665 return 0; 666} 667 668static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num, 669 u8 vcpi, uint16_t pbn, 670 u8 number_sdp_streams, 671 u8 *sdp_stream_sink) 672{ 673 struct drm_dp_sideband_msg_req_body req; 674 memset(&req, 0, sizeof(req)); 675 req.req_type = DP_ALLOCATE_PAYLOAD; 676 req.u.allocate_payload.port_number = port_num; 677 req.u.allocate_payload.vcpi = vcpi; 678 req.u.allocate_payload.pbn = pbn; 679 req.u.allocate_payload.number_sdp_streams = number_sdp_streams; 680 memcpy(req.u.allocate_payload.sdp_stream_sink, sdp_stream_sink, 681 number_sdp_streams); 682 drm_dp_encode_sideband_req(&req, msg); 683 msg->path_msg = true; 684 return 0; 685} 686 687static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr, 688 struct drm_dp_vcpi *vcpi) 689{ 690 int ret, vcpi_ret; 691 692 mutex_lock(&mgr->payload_lock); 693 ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1); 694 if (ret > mgr->max_payloads) { 695 ret = -EINVAL; 696 DRM_DEBUG_KMS("out of payload ids %d\n", ret); 697 goto out_unlock; 698 } 699 700 vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1); 701 if (vcpi_ret > mgr->max_payloads) { 702 ret = -EINVAL; 703 DRM_DEBUG_KMS("out of vcpi ids %d\n", ret); 704 goto out_unlock; 705 } 706 707 set_bit(ret, &mgr->payload_mask); 708 set_bit(vcpi_ret, &mgr->vcpi_mask); 709 vcpi->vcpi = vcpi_ret + 1; 710 mgr->proposed_vcpis[ret - 1] = vcpi; 711out_unlock: 712 mutex_unlock(&mgr->payload_lock); 713 return ret; 714} 715 716static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr, 717 int vcpi) 718{ 719 int i; 720 if (vcpi == 0) 721 return; 722 723 mutex_lock(&mgr->payload_lock); 724 DRM_DEBUG_KMS("putting payload %d\n", vcpi); 725 clear_bit(vcpi - 1, &mgr->vcpi_mask); 726 727 for (i = 0; i < mgr->max_payloads; i++) { 728 if (mgr->proposed_vcpis[i]) 729 if (mgr->proposed_vcpis[i]->vcpi == vcpi) { 730 mgr->proposed_vcpis[i] = NULL; 731 clear_bit(i + 1, &mgr->payload_mask); 732 } 733 } 734 mutex_unlock(&mgr->payload_lock); 735} 736 737static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr, 738 struct drm_dp_sideband_msg_tx *txmsg) 739{ 740 bool ret; 741 742 /* 743 * All updates to txmsg->state are protected by mgr->qlock, and the two 744 * cases we check here are terminal states. For those the barriers 745 * provided by the wake_up/wait_event pair are enough. 746 */ 747 ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX || 748 txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT); 749 return ret; 750} 751 752static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb, 753 struct drm_dp_sideband_msg_tx *txmsg) 754{ 755 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; 756 int ret; 757 758 ret = wait_event_timeout(mgr->tx_waitq, 759 check_txmsg_state(mgr, txmsg), 760 (4 * HZ)); 761 mutex_lock(&mstb->mgr->qlock); 762 if (ret > 0) { 763 if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) { 764 ret = -EIO; 765 goto out; 766 } 767 } else { 768 DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno); 769 770 /* dump some state */ 771 ret = -EIO; 772 773 /* remove from q */ 774 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED || 775 txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) { 776 list_del(&txmsg->next); 777 } 778 779 if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND || 780 txmsg->state == DRM_DP_SIDEBAND_TX_SENT) { 781 mstb->tx_slots[txmsg->seqno] = NULL; 782 } 783 } 784out: 785 mutex_unlock(&mgr->qlock); 786 787 return ret; 788} 789 790static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad) 791{ 792 struct drm_dp_mst_branch *mstb; 793 794 mstb = kzalloc(sizeof(*mstb), GFP_KERNEL); 795 if (!mstb) 796 return NULL; 797 798 mstb->lct = lct; 799 if (lct > 1) 800 memcpy(mstb->rad, rad, lct / 2); 801 INIT_LIST_HEAD(&mstb->ports); 802 kref_init(&mstb->kref); 803 return mstb; 804} 805 806static void drm_dp_free_mst_port(struct kref *kref); 807 808static void drm_dp_free_mst_branch_device(struct kref *kref) 809{ 810 struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref); 811 if (mstb->port_parent) { 812 if (list_empty(&mstb->port_parent->next)) 813 kref_put(&mstb->port_parent->kref, drm_dp_free_mst_port); 814 } 815 kfree(mstb); 816} 817 818static void drm_dp_destroy_mst_branch_device(struct kref *kref) 819{ 820 struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref); 821 struct drm_dp_mst_port *port, *tmp; 822 bool wake_tx = false; 823 824 /* 825 * init kref again to be used by ports to remove mst branch when it is 826 * not needed anymore 827 */ 828 kref_init(kref); 829 830 if (mstb->port_parent && list_empty(&mstb->port_parent->next)) 831 kref_get(&mstb->port_parent->kref); 832 833 /* 834 * destroy all ports - don't need lock 835 * as there are no more references to the mst branch 836 * device at this point. 837 */ 838 list_for_each_entry_safe(port, tmp, &mstb->ports, next) { 839 list_del(&port->next); 840 drm_dp_put_port(port); 841 } 842 843 /* drop any tx slots msg */ 844 mutex_lock(&mstb->mgr->qlock); 845 if (mstb->tx_slots[0]) { 846 mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT; 847 mstb->tx_slots[0] = NULL; 848 wake_tx = true; 849 } 850 if (mstb->tx_slots[1]) { 851 mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT; 852 mstb->tx_slots[1] = NULL; 853 wake_tx = true; 854 } 855 mutex_unlock(&mstb->mgr->qlock); 856 857 if (wake_tx) 858 wake_up(&mstb->mgr->tx_waitq); 859 860 kref_put(kref, drm_dp_free_mst_branch_device); 861} 862 863static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb) 864{ 865 kref_put(&mstb->kref, drm_dp_destroy_mst_branch_device); 866} 867 868 869static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt) 870{ 871 struct drm_dp_mst_branch *mstb; 872 873 switch (old_pdt) { 874 case DP_PEER_DEVICE_DP_LEGACY_CONV: 875 case DP_PEER_DEVICE_SST_SINK: 876 /* remove i2c over sideband */ 877 drm_dp_mst_unregister_i2c_bus(&port->aux); 878 break; 879 case DP_PEER_DEVICE_MST_BRANCHING: 880 mstb = port->mstb; 881 port->mstb = NULL; 882 drm_dp_put_mst_branch_device(mstb); 883 break; 884 } 885} 886 887static void drm_dp_destroy_port(struct kref *kref) 888{ 889 struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref); 890 struct drm_dp_mst_topology_mgr *mgr = port->mgr; 891 892 if (!port->input) { 893 port->vcpi.num_slots = 0; 894 895 kfree(port->cached_edid); 896 897 /* 898 * The only time we don't have a connector 899 * on an output port is if the connector init 900 * fails. 901 */ 902 if (port->connector) { 903 /* we can't destroy the connector here, as 904 * we might be holding the mode_config.mutex 905 * from an EDID retrieval */ 906 907 mutex_lock(&mgr->destroy_connector_lock); 908 kref_get(&port->parent->kref); 909 list_add(&port->next, &mgr->destroy_connector_list); 910 mutex_unlock(&mgr->destroy_connector_lock); 911 schedule_work(&mgr->destroy_connector_work); 912 return; 913 } 914 /* no need to clean up vcpi 915 * as if we have no connector we never setup a vcpi */ 916 drm_dp_port_teardown_pdt(port, port->pdt); 917 } 918 kfree(port); 919} 920 921static void drm_dp_put_port(struct drm_dp_mst_port *port) 922{ 923 kref_put(&port->kref, drm_dp_destroy_port); 924} 925 926static struct drm_dp_mst_branch *drm_dp_mst_get_validated_mstb_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_branch *to_find) 927{ 928 struct drm_dp_mst_port *port; 929 struct drm_dp_mst_branch *rmstb; 930 if (to_find == mstb) { 931 kref_get(&mstb->kref); 932 return mstb; 933 } 934 list_for_each_entry(port, &mstb->ports, next) { 935 if (port->mstb) { 936 rmstb = drm_dp_mst_get_validated_mstb_ref_locked(port->mstb, to_find); 937 if (rmstb) 938 return rmstb; 939 } 940 } 941 return NULL; 942} 943 944static struct drm_dp_mst_branch *drm_dp_get_validated_mstb_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb) 945{ 946 struct drm_dp_mst_branch *rmstb = NULL; 947 mutex_lock(&mgr->lock); 948 if (mgr->mst_primary) 949 rmstb = drm_dp_mst_get_validated_mstb_ref_locked(mgr->mst_primary, mstb); 950 mutex_unlock(&mgr->lock); 951 return rmstb; 952} 953 954static struct drm_dp_mst_port *drm_dp_mst_get_port_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *to_find) 955{ 956 struct drm_dp_mst_port *port, *mport; 957 958 list_for_each_entry(port, &mstb->ports, next) { 959 if (port == to_find) { 960 kref_get(&port->kref); 961 return port; 962 } 963 if (port->mstb) { 964 mport = drm_dp_mst_get_port_ref_locked(port->mstb, to_find); 965 if (mport) 966 return mport; 967 } 968 } 969 return NULL; 970} 971 972static struct drm_dp_mst_port *drm_dp_get_validated_port_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) 973{ 974 struct drm_dp_mst_port *rport = NULL; 975 mutex_lock(&mgr->lock); 976 if (mgr->mst_primary) 977 rport = drm_dp_mst_get_port_ref_locked(mgr->mst_primary, port); 978 mutex_unlock(&mgr->lock); 979 return rport; 980} 981 982static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num) 983{ 984 struct drm_dp_mst_port *port; 985 986 list_for_each_entry(port, &mstb->ports, next) { 987 if (port->port_num == port_num) { 988 kref_get(&port->kref); 989 return port; 990 } 991 } 992 993 return NULL; 994} 995 996/* 997 * calculate a new RAD for this MST branch device 998 * if parent has an LCT of 2 then it has 1 nibble of RAD, 999 * if parent has an LCT of 3 then it has 2 nibbles of RAD, 1000 */ 1001static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port, 1002 u8 *rad) 1003{ 1004 int parent_lct = port->parent->lct; 1005 int shift = 4; 1006 int idx = (parent_lct - 1) / 2; 1007 if (parent_lct > 1) { 1008 memcpy(rad, port->parent->rad, idx + 1); 1009 shift = (parent_lct % 2) ? 4 : 0; 1010 } else 1011 rad[0] = 0; 1012 1013 rad[idx] |= port->port_num << shift; 1014 return parent_lct + 1; 1015} 1016 1017/* 1018 * return sends link address for new mstb 1019 */ 1020static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port) 1021{ 1022 int ret; 1023 u8 rad[6], lct; 1024 bool send_link = false; 1025 switch (port->pdt) { 1026 case DP_PEER_DEVICE_DP_LEGACY_CONV: 1027 case DP_PEER_DEVICE_SST_SINK: 1028 /* add i2c over sideband */ 1029 ret = drm_dp_mst_register_i2c_bus(&port->aux); 1030 break; 1031 case DP_PEER_DEVICE_MST_BRANCHING: 1032 lct = drm_dp_calculate_rad(port, rad); 1033 1034 port->mstb = drm_dp_add_mst_branch_device(lct, rad); 1035 port->mstb->mgr = port->mgr; 1036 port->mstb->port_parent = port; 1037 1038 send_link = true; 1039 break; 1040 } 1041 return send_link; 1042} 1043 1044static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid) 1045{ 1046 int ret; 1047 1048 memcpy(mstb->guid, guid, 16); 1049 1050 if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) { 1051 if (mstb->port_parent) { 1052 ret = drm_dp_send_dpcd_write( 1053 mstb->mgr, 1054 mstb->port_parent, 1055 DP_GUID, 1056 16, 1057 mstb->guid); 1058 } else { 1059 1060 ret = drm_dp_dpcd_write( 1061 mstb->mgr->aux, 1062 DP_GUID, 1063 mstb->guid, 1064 16); 1065 } 1066 } 1067} 1068 1069static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb, 1070 int pnum, 1071 char *proppath, 1072 size_t proppath_size) 1073{ 1074 int i; 1075 char temp[8]; 1076 snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id); 1077 for (i = 0; i < (mstb->lct - 1); i++) { 1078 int shift = (i % 2) ? 0 : 4; 1079 int port_num = (mstb->rad[i / 2] >> shift) & 0xf; 1080 snprintf(temp, sizeof(temp), "-%d", port_num); 1081 strlcat(proppath, temp, proppath_size); 1082 } 1083 snprintf(temp, sizeof(temp), "-%d", pnum); 1084 strlcat(proppath, temp, proppath_size); 1085} 1086 1087static void drm_dp_add_port(struct drm_dp_mst_branch *mstb, 1088 struct device *dev, 1089 struct drm_dp_link_addr_reply_port *port_msg) 1090{ 1091 struct drm_dp_mst_port *port; 1092 bool ret; 1093 bool created = false; 1094 int old_pdt = 0; 1095 int old_ddps = 0; 1096 port = drm_dp_get_port(mstb, port_msg->port_number); 1097 if (!port) { 1098 port = kzalloc(sizeof(*port), GFP_KERNEL); 1099 if (!port) 1100 return; 1101 kref_init(&port->kref); 1102 port->parent = mstb; 1103 port->port_num = port_msg->port_number; 1104 port->mgr = mstb->mgr; 1105 port->aux.name = "DPMST"; 1106 port->aux.dev = dev; 1107 created = true; 1108 } else { 1109 old_pdt = port->pdt; 1110 old_ddps = port->ddps; 1111 } 1112 1113 port->pdt = port_msg->peer_device_type; 1114 port->input = port_msg->input_port; 1115 port->mcs = port_msg->mcs; 1116 port->ddps = port_msg->ddps; 1117 port->ldps = port_msg->legacy_device_plug_status; 1118 port->dpcd_rev = port_msg->dpcd_revision; 1119 port->num_sdp_streams = port_msg->num_sdp_streams; 1120 port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks; 1121 1122 /* manage mstb port lists with mgr lock - take a reference 1123 for this list */ 1124 if (created) { 1125 mutex_lock(&mstb->mgr->lock); 1126 kref_get(&port->kref); 1127 list_add(&port->next, &mstb->ports); 1128 mutex_unlock(&mstb->mgr->lock); 1129 } 1130 1131 if (old_ddps != port->ddps) { 1132 if (port->ddps) { 1133 if (!port->input) 1134 drm_dp_send_enum_path_resources(mstb->mgr, mstb, port); 1135 } else { 1136 port->available_pbn = 0; 1137 } 1138 } 1139 1140 if (old_pdt != port->pdt && !port->input) { 1141 drm_dp_port_teardown_pdt(port, old_pdt); 1142 1143 ret = drm_dp_port_setup_pdt(port); 1144 if (ret == true) 1145 drm_dp_send_link_address(mstb->mgr, port->mstb); 1146 } 1147 1148 if (created && !port->input) { 1149 char proppath[255]; 1150 1151 build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath)); 1152 port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath); 1153 if (!port->connector) { 1154 /* remove it from the port list */ 1155 mutex_lock(&mstb->mgr->lock); 1156 list_del(&port->next); 1157 mutex_unlock(&mstb->mgr->lock); 1158 /* drop port list reference */ 1159 drm_dp_put_port(port); 1160 goto out; 1161 } 1162 if (port->port_num >= DP_MST_LOGICAL_PORT_0) { 1163 port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc); 1164 drm_mode_connector_set_tile_property(port->connector); 1165 } 1166 (*mstb->mgr->cbs->register_connector)(port->connector); 1167 } 1168 1169out: 1170 /* put reference to this port */ 1171 drm_dp_put_port(port); 1172} 1173 1174static void drm_dp_update_port(struct drm_dp_mst_branch *mstb, 1175 struct drm_dp_connection_status_notify *conn_stat) 1176{ 1177 struct drm_dp_mst_port *port; 1178 int old_pdt; 1179 int old_ddps; 1180 bool dowork = false; 1181 port = drm_dp_get_port(mstb, conn_stat->port_number); 1182 if (!port) 1183 return; 1184 1185 old_ddps = port->ddps; 1186 old_pdt = port->pdt; 1187 port->pdt = conn_stat->peer_device_type; 1188 port->mcs = conn_stat->message_capability_status; 1189 port->ldps = conn_stat->legacy_device_plug_status; 1190 port->ddps = conn_stat->displayport_device_plug_status; 1191 1192 if (old_ddps != port->ddps) { 1193 if (port->ddps) { 1194 dowork = true; 1195 } else { 1196 port->available_pbn = 0; 1197 } 1198 } 1199 if (old_pdt != port->pdt && !port->input) { 1200 drm_dp_port_teardown_pdt(port, old_pdt); 1201 1202 if (drm_dp_port_setup_pdt(port)) 1203 dowork = true; 1204 } 1205 1206 drm_dp_put_port(port); 1207 if (dowork) 1208 queue_work(system_long_wq, &mstb->mgr->work); 1209 1210} 1211 1212static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr, 1213 u8 lct, u8 *rad) 1214{ 1215 struct drm_dp_mst_branch *mstb; 1216 struct drm_dp_mst_port *port; 1217 int i; 1218 /* find the port by iterating down */ 1219 1220 mutex_lock(&mgr->lock); 1221 mstb = mgr->mst_primary; 1222 1223 for (i = 0; i < lct - 1; i++) { 1224 int shift = (i % 2) ? 0 : 4; 1225 int port_num = (rad[i / 2] >> shift) & 0xf; 1226 1227 list_for_each_entry(port, &mstb->ports, next) { 1228 if (port->port_num == port_num) { 1229 mstb = port->mstb; 1230 if (!mstb) { 1231 DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]); 1232 goto out; 1233 } 1234 1235 break; 1236 } 1237 } 1238 } 1239 kref_get(&mstb->kref); 1240out: 1241 mutex_unlock(&mgr->lock); 1242 return mstb; 1243} 1244 1245static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper( 1246 struct drm_dp_mst_branch *mstb, 1247 uint8_t *guid) 1248{ 1249 struct drm_dp_mst_branch *found_mstb; 1250 struct drm_dp_mst_port *port; 1251 1252 if (memcmp(mstb->guid, guid, 16) == 0) 1253 return mstb; 1254 1255 1256 list_for_each_entry(port, &mstb->ports, next) { 1257 if (!port->mstb) 1258 continue; 1259 1260 found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid); 1261 1262 if (found_mstb) 1263 return found_mstb; 1264 } 1265 1266 return NULL; 1267} 1268 1269static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device_by_guid( 1270 struct drm_dp_mst_topology_mgr *mgr, 1271 uint8_t *guid) 1272{ 1273 struct drm_dp_mst_branch *mstb; 1274 1275 /* find the port by iterating down */ 1276 mutex_lock(&mgr->lock); 1277 1278 mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid); 1279 1280 if (mstb) 1281 kref_get(&mstb->kref); 1282 1283 mutex_unlock(&mgr->lock); 1284 return mstb; 1285} 1286 1287static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr, 1288 struct drm_dp_mst_branch *mstb) 1289{ 1290 struct drm_dp_mst_port *port; 1291 struct drm_dp_mst_branch *mstb_child; 1292 if (!mstb->link_address_sent) 1293 drm_dp_send_link_address(mgr, mstb); 1294 1295 list_for_each_entry(port, &mstb->ports, next) { 1296 if (port->input) 1297 continue; 1298 1299 if (!port->ddps) 1300 continue; 1301 1302 if (!port->available_pbn) 1303 drm_dp_send_enum_path_resources(mgr, mstb, port); 1304 1305 if (port->mstb) { 1306 mstb_child = drm_dp_get_validated_mstb_ref(mgr, port->mstb); 1307 if (mstb_child) { 1308 drm_dp_check_and_send_link_address(mgr, mstb_child); 1309 drm_dp_put_mst_branch_device(mstb_child); 1310 } 1311 } 1312 } 1313} 1314 1315static void drm_dp_mst_link_probe_work(struct work_struct *work) 1316{ 1317 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work); 1318 struct drm_dp_mst_branch *mstb; 1319 1320 mutex_lock(&mgr->lock); 1321 mstb = mgr->mst_primary; 1322 if (mstb) { 1323 kref_get(&mstb->kref); 1324 } 1325 mutex_unlock(&mgr->lock); 1326 if (mstb) { 1327 drm_dp_check_and_send_link_address(mgr, mstb); 1328 drm_dp_put_mst_branch_device(mstb); 1329 } 1330} 1331 1332static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr, 1333 u8 *guid) 1334{ 1335 static u8 zero_guid[16]; 1336 1337 if (!memcmp(guid, zero_guid, 16)) { 1338 u64 salt = get_jiffies_64(); 1339 memcpy(&guid[0], &salt, sizeof(u64)); 1340 memcpy(&guid[8], &salt, sizeof(u64)); 1341 return false; 1342 } 1343 return true; 1344} 1345 1346#if 0 1347static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes) 1348{ 1349 struct drm_dp_sideband_msg_req_body req; 1350 1351 req.req_type = DP_REMOTE_DPCD_READ; 1352 req.u.dpcd_read.port_number = port_num; 1353 req.u.dpcd_read.dpcd_address = offset; 1354 req.u.dpcd_read.num_bytes = num_bytes; 1355 drm_dp_encode_sideband_req(&req, msg); 1356 1357 return 0; 1358} 1359#endif 1360 1361static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr, 1362 bool up, u8 *msg, int len) 1363{ 1364 int ret; 1365 int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE; 1366 int tosend, total, offset; 1367 int retries = 0; 1368 1369retry: 1370 total = len; 1371 offset = 0; 1372 do { 1373 tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total); 1374 1375 ret = drm_dp_dpcd_write(mgr->aux, regbase + offset, 1376 &msg[offset], 1377 tosend); 1378 if (ret != tosend) { 1379 if (ret == -EIO && retries < 5) { 1380 retries++; 1381 goto retry; 1382 } 1383 DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret); 1384 1385 return -EIO; 1386 } 1387 offset += tosend; 1388 total -= tosend; 1389 } while (total > 0); 1390 return 0; 1391} 1392 1393static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr, 1394 struct drm_dp_sideband_msg_tx *txmsg) 1395{ 1396 struct drm_dp_mst_branch *mstb = txmsg->dst; 1397 u8 req_type; 1398 1399 /* both msg slots are full */ 1400 if (txmsg->seqno == -1) { 1401 if (mstb->tx_slots[0] && mstb->tx_slots[1]) { 1402 DRM_DEBUG_KMS("%s: failed to find slot\n", __func__); 1403 return -EAGAIN; 1404 } 1405 if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) { 1406 txmsg->seqno = mstb->last_seqno; 1407 mstb->last_seqno ^= 1; 1408 } else if (mstb->tx_slots[0] == NULL) 1409 txmsg->seqno = 0; 1410 else 1411 txmsg->seqno = 1; 1412 mstb->tx_slots[txmsg->seqno] = txmsg; 1413 } 1414 1415 req_type = txmsg->msg[0] & 0x7f; 1416 if (req_type == DP_CONNECTION_STATUS_NOTIFY || 1417 req_type == DP_RESOURCE_STATUS_NOTIFY) 1418 hdr->broadcast = 1; 1419 else 1420 hdr->broadcast = 0; 1421 hdr->path_msg = txmsg->path_msg; 1422 hdr->lct = mstb->lct; 1423 hdr->lcr = mstb->lct - 1; 1424 if (mstb->lct > 1) 1425 memcpy(hdr->rad, mstb->rad, mstb->lct / 2); 1426 hdr->seqno = txmsg->seqno; 1427 return 0; 1428} 1429/* 1430 * process a single block of the next message in the sideband queue 1431 */ 1432static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr, 1433 struct drm_dp_sideband_msg_tx *txmsg, 1434 bool up) 1435{ 1436 u8 chunk[48]; 1437 struct drm_dp_sideband_msg_hdr hdr; 1438 int len, space, idx, tosend; 1439 int ret; 1440 1441 memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr)); 1442 1443 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) { 1444 txmsg->seqno = -1; 1445 txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND; 1446 } 1447 1448 /* make hdr from dst mst - for replies use seqno 1449 otherwise assign one */ 1450 ret = set_hdr_from_dst_qlock(&hdr, txmsg); 1451 if (ret < 0) 1452 return ret; 1453 1454 /* amount left to send in this message */ 1455 len = txmsg->cur_len - txmsg->cur_offset; 1456 1457 /* 48 - sideband msg size - 1 byte for data CRC, x header bytes */ 1458 space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr); 1459 1460 tosend = min(len, space); 1461 if (len == txmsg->cur_len) 1462 hdr.somt = 1; 1463 if (space >= len) 1464 hdr.eomt = 1; 1465 1466 1467 hdr.msg_len = tosend + 1; 1468 drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx); 1469 memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend); 1470 /* add crc at end */ 1471 drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend); 1472 idx += tosend + 1; 1473 1474 ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx); 1475 if (ret) { 1476 DRM_DEBUG_KMS("sideband msg failed to send\n"); 1477 return ret; 1478 } 1479 1480 txmsg->cur_offset += tosend; 1481 if (txmsg->cur_offset == txmsg->cur_len) { 1482 txmsg->state = DRM_DP_SIDEBAND_TX_SENT; 1483 return 1; 1484 } 1485 return 0; 1486} 1487 1488static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) 1489{ 1490 struct drm_dp_sideband_msg_tx *txmsg; 1491 int ret; 1492 1493 WARN_ON(!mutex_is_locked(&mgr->qlock)); 1494 1495 /* construct a chunk from the first msg in the tx_msg queue */ 1496 if (list_empty(&mgr->tx_msg_downq)) 1497 return; 1498 1499 txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next); 1500 ret = process_single_tx_qlock(mgr, txmsg, false); 1501 if (ret == 1) { 1502 /* txmsg is sent it should be in the slots now */ 1503 list_del(&txmsg->next); 1504 } else if (ret) { 1505 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret); 1506 list_del(&txmsg->next); 1507 if (txmsg->seqno != -1) 1508 txmsg->dst->tx_slots[txmsg->seqno] = NULL; 1509 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT; 1510 wake_up(&mgr->tx_waitq); 1511 } 1512} 1513 1514/* called holding qlock */ 1515static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr, 1516 struct drm_dp_sideband_msg_tx *txmsg) 1517{ 1518 int ret; 1519 1520 /* construct a chunk from the first msg in the tx_msg queue */ 1521 ret = process_single_tx_qlock(mgr, txmsg, true); 1522 1523 if (ret != 1) 1524 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret); 1525 1526 txmsg->dst->tx_slots[txmsg->seqno] = NULL; 1527} 1528 1529static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr, 1530 struct drm_dp_sideband_msg_tx *txmsg) 1531{ 1532 mutex_lock(&mgr->qlock); 1533 list_add_tail(&txmsg->next, &mgr->tx_msg_downq); 1534 if (list_is_singular(&mgr->tx_msg_downq)) 1535 process_single_down_tx_qlock(mgr); 1536 mutex_unlock(&mgr->qlock); 1537} 1538 1539static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, 1540 struct drm_dp_mst_branch *mstb) 1541{ 1542 int len; 1543 struct drm_dp_sideband_msg_tx *txmsg; 1544 int ret; 1545 1546 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 1547 if (!txmsg) 1548 return; 1549 1550 txmsg->dst = mstb; 1551 len = build_link_address(txmsg); 1552 1553 mstb->link_address_sent = true; 1554 drm_dp_queue_down_tx(mgr, txmsg); 1555 1556 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); 1557 if (ret > 0) { 1558 int i; 1559 1560 if (txmsg->reply.reply_type == 1) 1561 DRM_DEBUG_KMS("link address nak received\n"); 1562 else { 1563 DRM_DEBUG_KMS("link address reply: %d\n", txmsg->reply.u.link_addr.nports); 1564 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) { 1565 DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", i, 1566 txmsg->reply.u.link_addr.ports[i].input_port, 1567 txmsg->reply.u.link_addr.ports[i].peer_device_type, 1568 txmsg->reply.u.link_addr.ports[i].port_number, 1569 txmsg->reply.u.link_addr.ports[i].dpcd_revision, 1570 txmsg->reply.u.link_addr.ports[i].mcs, 1571 txmsg->reply.u.link_addr.ports[i].ddps, 1572 txmsg->reply.u.link_addr.ports[i].legacy_device_plug_status, 1573 txmsg->reply.u.link_addr.ports[i].num_sdp_streams, 1574 txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks); 1575 } 1576 1577 drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid); 1578 1579 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) { 1580 drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]); 1581 } 1582 (*mgr->cbs->hotplug)(mgr); 1583 } 1584 } else { 1585 mstb->link_address_sent = false; 1586 DRM_DEBUG_KMS("link address failed %d\n", ret); 1587 } 1588 1589 kfree(txmsg); 1590} 1591 1592static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, 1593 struct drm_dp_mst_branch *mstb, 1594 struct drm_dp_mst_port *port) 1595{ 1596 int len; 1597 struct drm_dp_sideband_msg_tx *txmsg; 1598 int ret; 1599 1600 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 1601 if (!txmsg) 1602 return -ENOMEM; 1603 1604 txmsg->dst = mstb; 1605 len = build_enum_path_resources(txmsg, port->port_num); 1606 1607 drm_dp_queue_down_tx(mgr, txmsg); 1608 1609 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); 1610 if (ret > 0) { 1611 if (txmsg->reply.reply_type == 1) 1612 DRM_DEBUG_KMS("enum path resources nak received\n"); 1613 else { 1614 if (port->port_num != txmsg->reply.u.path_resources.port_number) 1615 DRM_ERROR("got incorrect port in response\n"); 1616 DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg->reply.u.path_resources.port_number, txmsg->reply.u.path_resources.full_payload_bw_number, 1617 txmsg->reply.u.path_resources.avail_payload_bw_number); 1618 port->available_pbn = txmsg->reply.u.path_resources.avail_payload_bw_number; 1619 } 1620 } 1621 1622 kfree(txmsg); 1623 return 0; 1624} 1625 1626static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb) 1627{ 1628 if (!mstb->port_parent) 1629 return NULL; 1630 1631 if (mstb->port_parent->mstb != mstb) 1632 return mstb->port_parent; 1633 1634 return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent); 1635} 1636 1637static struct drm_dp_mst_branch *drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr, 1638 struct drm_dp_mst_branch *mstb, 1639 int *port_num) 1640{ 1641 struct drm_dp_mst_branch *rmstb = NULL; 1642 struct drm_dp_mst_port *found_port; 1643 mutex_lock(&mgr->lock); 1644 if (mgr->mst_primary) { 1645 found_port = drm_dp_get_last_connected_port_to_mstb(mstb); 1646 1647 if (found_port) { 1648 rmstb = found_port->parent; 1649 kref_get(&rmstb->kref); 1650 *port_num = found_port->port_num; 1651 } 1652 } 1653 mutex_unlock(&mgr->lock); 1654 return rmstb; 1655} 1656 1657static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr, 1658 struct drm_dp_mst_port *port, 1659 int id, 1660 int pbn) 1661{ 1662 struct drm_dp_sideband_msg_tx *txmsg; 1663 struct drm_dp_mst_branch *mstb; 1664 int len, ret, port_num; 1665 u8 sinks[DRM_DP_MAX_SDP_STREAMS]; 1666 int i; 1667 1668 port = drm_dp_get_validated_port_ref(mgr, port); 1669 if (!port) 1670 return -EINVAL; 1671 1672 port_num = port->port_num; 1673 mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent); 1674 if (!mstb) { 1675 mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num); 1676 1677 if (!mstb) { 1678 drm_dp_put_port(port); 1679 return -EINVAL; 1680 } 1681 } 1682 1683 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 1684 if (!txmsg) { 1685 ret = -ENOMEM; 1686 goto fail_put; 1687 } 1688 1689 for (i = 0; i < port->num_sdp_streams; i++) 1690 sinks[i] = i; 1691 1692 txmsg->dst = mstb; 1693 len = build_allocate_payload(txmsg, port_num, 1694 id, 1695 pbn, port->num_sdp_streams, sinks); 1696 1697 drm_dp_queue_down_tx(mgr, txmsg); 1698 1699 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); 1700 if (ret > 0) { 1701 if (txmsg->reply.reply_type == 1) { 1702 ret = -EINVAL; 1703 } else 1704 ret = 0; 1705 } 1706 kfree(txmsg); 1707fail_put: 1708 drm_dp_put_mst_branch_device(mstb); 1709 drm_dp_put_port(port); 1710 return ret; 1711} 1712 1713static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr, 1714 int id, 1715 struct drm_dp_payload *payload) 1716{ 1717 int ret; 1718 1719 ret = drm_dp_dpcd_write_payload(mgr, id, payload); 1720 if (ret < 0) { 1721 payload->payload_state = 0; 1722 return ret; 1723 } 1724 payload->payload_state = DP_PAYLOAD_LOCAL; 1725 return 0; 1726} 1727 1728static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr, 1729 struct drm_dp_mst_port *port, 1730 int id, 1731 struct drm_dp_payload *payload) 1732{ 1733 int ret; 1734 ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn); 1735 if (ret < 0) 1736 return ret; 1737 payload->payload_state = DP_PAYLOAD_REMOTE; 1738 return ret; 1739} 1740 1741static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr, 1742 struct drm_dp_mst_port *port, 1743 int id, 1744 struct drm_dp_payload *payload) 1745{ 1746 DRM_DEBUG_KMS("\n"); 1747 /* its okay for these to fail */ 1748 if (port) { 1749 drm_dp_payload_send_msg(mgr, port, id, 0); 1750 } 1751 1752 drm_dp_dpcd_write_payload(mgr, id, payload); 1753 payload->payload_state = DP_PAYLOAD_DELETE_LOCAL; 1754 return 0; 1755} 1756 1757static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr, 1758 int id, 1759 struct drm_dp_payload *payload) 1760{ 1761 payload->payload_state = 0; 1762 return 0; 1763} 1764 1765/** 1766 * drm_dp_update_payload_part1() - Execute payload update part 1 1767 * @mgr: manager to use. 1768 * 1769 * This iterates over all proposed virtual channels, and tries to 1770 * allocate space in the link for them. For 0->slots transitions, 1771 * this step just writes the VCPI to the MST device. For slots->0 1772 * transitions, this writes the updated VCPIs and removes the 1773 * remote VC payloads. 1774 * 1775 * after calling this the driver should generate ACT and payload 1776 * packets. 1777 */ 1778int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr) 1779{ 1780 int i, j; 1781 int cur_slots = 1; 1782 struct drm_dp_payload req_payload; 1783 struct drm_dp_mst_port *port; 1784 1785 mutex_lock(&mgr->payload_lock); 1786 for (i = 0; i < mgr->max_payloads; i++) { 1787 /* solve the current payloads - compare to the hw ones 1788 - update the hw view */ 1789 req_payload.start_slot = cur_slots; 1790 if (mgr->proposed_vcpis[i]) { 1791 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi); 1792 port = drm_dp_get_validated_port_ref(mgr, port); 1793 if (!port) { 1794 mutex_unlock(&mgr->payload_lock); 1795 return -EINVAL; 1796 } 1797 req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots; 1798 req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi; 1799 } else { 1800 port = NULL; 1801 req_payload.num_slots = 0; 1802 } 1803 1804 if (mgr->payloads[i].start_slot != req_payload.start_slot) { 1805 mgr->payloads[i].start_slot = req_payload.start_slot; 1806 } 1807 /* work out what is required to happen with this payload */ 1808 if (mgr->payloads[i].num_slots != req_payload.num_slots) { 1809 1810 /* need to push an update for this payload */ 1811 if (req_payload.num_slots) { 1812 drm_dp_create_payload_step1(mgr, mgr->proposed_vcpis[i]->vcpi, &req_payload); 1813 mgr->payloads[i].num_slots = req_payload.num_slots; 1814 mgr->payloads[i].vcpi = req_payload.vcpi; 1815 } else if (mgr->payloads[i].num_slots) { 1816 mgr->payloads[i].num_slots = 0; 1817 drm_dp_destroy_payload_step1(mgr, port, port->vcpi.vcpi, &mgr->payloads[i]); 1818 req_payload.payload_state = mgr->payloads[i].payload_state; 1819 mgr->payloads[i].start_slot = 0; 1820 } 1821 mgr->payloads[i].payload_state = req_payload.payload_state; 1822 } 1823 cur_slots += req_payload.num_slots; 1824 1825 if (port) 1826 drm_dp_put_port(port); 1827 } 1828 1829 for (i = 0; i < mgr->max_payloads; i++) { 1830 if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) { 1831 DRM_DEBUG_KMS("removing payload %d\n", i); 1832 for (j = i; j < mgr->max_payloads - 1; j++) { 1833 memcpy(&mgr->payloads[j], &mgr->payloads[j + 1], sizeof(struct drm_dp_payload)); 1834 mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1]; 1835 if (mgr->proposed_vcpis[j] && mgr->proposed_vcpis[j]->num_slots) { 1836 set_bit(j + 1, &mgr->payload_mask); 1837 } else { 1838 clear_bit(j + 1, &mgr->payload_mask); 1839 } 1840 } 1841 memset(&mgr->payloads[mgr->max_payloads - 1], 0, sizeof(struct drm_dp_payload)); 1842 mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL; 1843 clear_bit(mgr->max_payloads, &mgr->payload_mask); 1844 1845 } 1846 } 1847 mutex_unlock(&mgr->payload_lock); 1848 1849 return 0; 1850} 1851EXPORT_SYMBOL(drm_dp_update_payload_part1); 1852 1853/** 1854 * drm_dp_update_payload_part2() - Execute payload update part 2 1855 * @mgr: manager to use. 1856 * 1857 * This iterates over all proposed virtual channels, and tries to 1858 * allocate space in the link for them. For 0->slots transitions, 1859 * this step writes the remote VC payload commands. For slots->0 1860 * this just resets some internal state. 1861 */ 1862int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr) 1863{ 1864 struct drm_dp_mst_port *port; 1865 int i; 1866 int ret = 0; 1867 mutex_lock(&mgr->payload_lock); 1868 for (i = 0; i < mgr->max_payloads; i++) { 1869 1870 if (!mgr->proposed_vcpis[i]) 1871 continue; 1872 1873 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi); 1874 1875 DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state); 1876 if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) { 1877 ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]); 1878 } else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) { 1879 ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]); 1880 } 1881 if (ret) { 1882 mutex_unlock(&mgr->payload_lock); 1883 return ret; 1884 } 1885 } 1886 mutex_unlock(&mgr->payload_lock); 1887 return 0; 1888} 1889EXPORT_SYMBOL(drm_dp_update_payload_part2); 1890 1891#if 0 /* unused as of yet */ 1892static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr, 1893 struct drm_dp_mst_port *port, 1894 int offset, int size) 1895{ 1896 int len; 1897 struct drm_dp_sideband_msg_tx *txmsg; 1898 1899 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 1900 if (!txmsg) 1901 return -ENOMEM; 1902 1903 len = build_dpcd_read(txmsg, port->port_num, 0, 8); 1904 txmsg->dst = port->parent; 1905 1906 drm_dp_queue_down_tx(mgr, txmsg); 1907 1908 return 0; 1909} 1910#endif 1911 1912static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr, 1913 struct drm_dp_mst_port *port, 1914 int offset, int size, u8 *bytes) 1915{ 1916 int len; 1917 int ret; 1918 struct drm_dp_sideband_msg_tx *txmsg; 1919 struct drm_dp_mst_branch *mstb; 1920 1921 mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent); 1922 if (!mstb) 1923 return -EINVAL; 1924 1925 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 1926 if (!txmsg) { 1927 ret = -ENOMEM; 1928 goto fail_put; 1929 } 1930 1931 len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes); 1932 txmsg->dst = mstb; 1933 1934 drm_dp_queue_down_tx(mgr, txmsg); 1935 1936 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); 1937 if (ret > 0) { 1938 if (txmsg->reply.reply_type == 1) { 1939 ret = -EINVAL; 1940 } else 1941 ret = 0; 1942 } 1943 kfree(txmsg); 1944fail_put: 1945 drm_dp_put_mst_branch_device(mstb); 1946 return ret; 1947} 1948 1949static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type) 1950{ 1951 struct drm_dp_sideband_msg_reply_body reply; 1952 1953 reply.reply_type = 0; 1954 reply.req_type = req_type; 1955 drm_dp_encode_sideband_reply(&reply, msg); 1956 return 0; 1957} 1958 1959static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr, 1960 struct drm_dp_mst_branch *mstb, 1961 int req_type, int seqno, bool broadcast) 1962{ 1963 struct drm_dp_sideband_msg_tx *txmsg; 1964 1965 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 1966 if (!txmsg) 1967 return -ENOMEM; 1968 1969 txmsg->dst = mstb; 1970 txmsg->seqno = seqno; 1971 drm_dp_encode_up_ack_reply(txmsg, req_type); 1972 1973 mutex_lock(&mgr->qlock); 1974 1975 process_single_up_tx_qlock(mgr, txmsg); 1976 1977 mutex_unlock(&mgr->qlock); 1978 1979 kfree(txmsg); 1980 return 0; 1981} 1982 1983static bool drm_dp_get_vc_payload_bw(int dp_link_bw, 1984 int dp_link_count, 1985 int *out) 1986{ 1987 switch (dp_link_bw) { 1988 default: 1989 DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n", 1990 dp_link_bw, dp_link_count); 1991 return false; 1992 1993 case DP_LINK_BW_1_62: 1994 *out = 3 * dp_link_count; 1995 break; 1996 case DP_LINK_BW_2_7: 1997 *out = 5 * dp_link_count; 1998 break; 1999 case DP_LINK_BW_5_4: 2000 *out = 10 * dp_link_count; 2001 break; 2002 } 2003 return true; 2004} 2005 2006/** 2007 * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager 2008 * @mgr: manager to set state for 2009 * @mst_state: true to enable MST on this connector - false to disable. 2010 * 2011 * This is called by the driver when it detects an MST capable device plugged 2012 * into a DP MST capable port, or when a DP MST capable device is unplugged. 2013 */ 2014int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state) 2015{ 2016 int ret = 0; 2017 struct drm_dp_mst_branch *mstb = NULL; 2018 2019 mutex_lock(&mgr->lock); 2020 if (mst_state == mgr->mst_state) 2021 goto out_unlock; 2022 2023 mgr->mst_state = mst_state; 2024 /* set the device into MST mode */ 2025 if (mst_state) { 2026 WARN_ON(mgr->mst_primary); 2027 2028 /* get dpcd info */ 2029 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE); 2030 if (ret != DP_RECEIVER_CAP_SIZE) { 2031 DRM_DEBUG_KMS("failed to read DPCD\n"); 2032 goto out_unlock; 2033 } 2034 2035 if (!drm_dp_get_vc_payload_bw(mgr->dpcd[1], 2036 mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK, 2037 &mgr->pbn_div)) { 2038 ret = -EINVAL; 2039 goto out_unlock; 2040 } 2041 2042 mgr->total_pbn = 2560; 2043 mgr->total_slots = DIV_ROUND_UP(mgr->total_pbn, mgr->pbn_div); 2044 mgr->avail_slots = mgr->total_slots; 2045 2046 /* add initial branch device at LCT 1 */ 2047 mstb = drm_dp_add_mst_branch_device(1, NULL); 2048 if (mstb == NULL) { 2049 ret = -ENOMEM; 2050 goto out_unlock; 2051 } 2052 mstb->mgr = mgr; 2053 2054 /* give this the main reference */ 2055 mgr->mst_primary = mstb; 2056 kref_get(&mgr->mst_primary->kref); 2057 2058 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 2059 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC); 2060 if (ret < 0) { 2061 goto out_unlock; 2062 } 2063 2064 { 2065 struct drm_dp_payload reset_pay; 2066 reset_pay.start_slot = 0; 2067 reset_pay.num_slots = 0x3f; 2068 drm_dp_dpcd_write_payload(mgr, 0, &reset_pay); 2069 } 2070 2071 queue_work(system_long_wq, &mgr->work); 2072 2073 ret = 0; 2074 } else { 2075 /* disable MST on the device */ 2076 mstb = mgr->mst_primary; 2077 mgr->mst_primary = NULL; 2078 /* this can fail if the device is gone */ 2079 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0); 2080 ret = 0; 2081 memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload)); 2082 mgr->payload_mask = 0; 2083 set_bit(0, &mgr->payload_mask); 2084 mgr->vcpi_mask = 0; 2085 } 2086 2087out_unlock: 2088 mutex_unlock(&mgr->lock); 2089 if (mstb) 2090 drm_dp_put_mst_branch_device(mstb); 2091 return ret; 2092 2093} 2094EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst); 2095 2096/** 2097 * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager 2098 * @mgr: manager to suspend 2099 * 2100 * This function tells the MST device that we can't handle UP messages 2101 * anymore. This should stop it from sending any since we are suspended. 2102 */ 2103void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr) 2104{ 2105 mutex_lock(&mgr->lock); 2106 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 2107 DP_MST_EN | DP_UPSTREAM_IS_SRC); 2108 mutex_unlock(&mgr->lock); 2109 flush_work(&mgr->work); 2110 flush_work(&mgr->destroy_connector_work); 2111} 2112EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend); 2113 2114/** 2115 * drm_dp_mst_topology_mgr_resume() - resume the MST manager 2116 * @mgr: manager to resume 2117 * 2118 * This will fetch DPCD and see if the device is still there, 2119 * if it is, it will rewrite the MSTM control bits, and return. 2120 * 2121 * if the device fails this returns -1, and the driver should do 2122 * a full MST reprobe, in case we were undocked. 2123 */ 2124int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr) 2125{ 2126 int ret = 0; 2127 2128 mutex_lock(&mgr->lock); 2129 2130 if (mgr->mst_primary) { 2131 int sret; 2132 u8 guid[16]; 2133 2134 sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE); 2135 if (sret != DP_RECEIVER_CAP_SIZE) { 2136 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n"); 2137 ret = -1; 2138 goto out_unlock; 2139 } 2140 2141 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 2142 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC); 2143 if (ret < 0) { 2144 DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n"); 2145 ret = -1; 2146 goto out_unlock; 2147 } 2148 2149 /* Some hubs forget their guids after they resume */ 2150 sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16); 2151 if (sret != 16) { 2152 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n"); 2153 ret = -1; 2154 goto out_unlock; 2155 } 2156 drm_dp_check_mstb_guid(mgr->mst_primary, guid); 2157 2158 ret = 0; 2159 } else 2160 ret = -1; 2161 2162out_unlock: 2163 mutex_unlock(&mgr->lock); 2164 return ret; 2165} 2166EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume); 2167 2168static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up) 2169{ 2170 int len; 2171 u8 replyblock[32]; 2172 int replylen, origlen, curreply; 2173 int ret; 2174 struct drm_dp_sideband_msg_rx *msg; 2175 int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE; 2176 msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv; 2177 2178 len = min(mgr->max_dpcd_transaction_bytes, 16); 2179 ret = drm_dp_dpcd_read(mgr->aux, basereg, 2180 replyblock, len); 2181 if (ret != len) { 2182 DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret); 2183 return; 2184 } 2185 ret = drm_dp_sideband_msg_build(msg, replyblock, len, true); 2186 if (!ret) { 2187 DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]); 2188 return; 2189 } 2190 replylen = msg->curchunk_len + msg->curchunk_hdrlen; 2191 2192 origlen = replylen; 2193 replylen -= len; 2194 curreply = len; 2195 while (replylen > 0) { 2196 len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16); 2197 ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply, 2198 replyblock, len); 2199 if (ret != len) { 2200 DRM_DEBUG_KMS("failed to read a chunk\n"); 2201 } 2202 ret = drm_dp_sideband_msg_build(msg, replyblock, len, false); 2203 if (ret == false) 2204 DRM_DEBUG_KMS("failed to build sideband msg\n"); 2205 curreply += len; 2206 replylen -= len; 2207 } 2208} 2209 2210static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) 2211{ 2212 int ret = 0; 2213 2214 drm_dp_get_one_sb_msg(mgr, false); 2215 2216 if (mgr->down_rep_recv.have_eomt) { 2217 struct drm_dp_sideband_msg_tx *txmsg; 2218 struct drm_dp_mst_branch *mstb; 2219 int slot = -1; 2220 mstb = drm_dp_get_mst_branch_device(mgr, 2221 mgr->down_rep_recv.initial_hdr.lct, 2222 mgr->down_rep_recv.initial_hdr.rad); 2223 2224 if (!mstb) { 2225 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->down_rep_recv.initial_hdr.lct); 2226 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); 2227 return 0; 2228 } 2229 2230 /* find the message */ 2231 slot = mgr->down_rep_recv.initial_hdr.seqno; 2232 mutex_lock(&mgr->qlock); 2233 txmsg = mstb->tx_slots[slot]; 2234 /* remove from slots */ 2235 mutex_unlock(&mgr->qlock); 2236 2237 if (!txmsg) { 2238 DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n", 2239 mstb, 2240 mgr->down_rep_recv.initial_hdr.seqno, 2241 mgr->down_rep_recv.initial_hdr.lct, 2242 mgr->down_rep_recv.initial_hdr.rad[0], 2243 mgr->down_rep_recv.msg[0]); 2244 drm_dp_put_mst_branch_device(mstb); 2245 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); 2246 return 0; 2247 } 2248 2249 drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply); 2250 if (txmsg->reply.reply_type == 1) { 2251 DRM_DEBUG_KMS("Got NAK reply: req 0x%02x, reason 0x%02x, nak data 0x%02x\n", txmsg->reply.req_type, txmsg->reply.u.nak.reason, txmsg->reply.u.nak.nak_data); 2252 } 2253 2254 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); 2255 drm_dp_put_mst_branch_device(mstb); 2256 2257 mutex_lock(&mgr->qlock); 2258 txmsg->state = DRM_DP_SIDEBAND_TX_RX; 2259 mstb->tx_slots[slot] = NULL; 2260 mutex_unlock(&mgr->qlock); 2261 2262 wake_up(&mgr->tx_waitq); 2263 } 2264 return ret; 2265} 2266 2267static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) 2268{ 2269 int ret = 0; 2270 drm_dp_get_one_sb_msg(mgr, true); 2271 2272 if (mgr->up_req_recv.have_eomt) { 2273 struct drm_dp_sideband_msg_req_body msg; 2274 struct drm_dp_mst_branch *mstb = NULL; 2275 bool seqno; 2276 2277 if (!mgr->up_req_recv.initial_hdr.broadcast) { 2278 mstb = drm_dp_get_mst_branch_device(mgr, 2279 mgr->up_req_recv.initial_hdr.lct, 2280 mgr->up_req_recv.initial_hdr.rad); 2281 if (!mstb) { 2282 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct); 2283 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); 2284 return 0; 2285 } 2286 } 2287 2288 seqno = mgr->up_req_recv.initial_hdr.seqno; 2289 drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg); 2290 2291 if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) { 2292 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false); 2293 2294 if (!mstb) 2295 mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.conn_stat.guid); 2296 2297 if (!mstb) { 2298 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct); 2299 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); 2300 return 0; 2301 } 2302 2303 drm_dp_update_port(mstb, &msg.u.conn_stat); 2304 2305 DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type); 2306 (*mgr->cbs->hotplug)(mgr); 2307 2308 } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) { 2309 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false); 2310 if (!mstb) 2311 mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.resource_stat.guid); 2312 2313 if (!mstb) { 2314 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct); 2315 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); 2316 return 0; 2317 } 2318 2319 DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn); 2320 } 2321 2322 drm_dp_put_mst_branch_device(mstb); 2323 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); 2324 } 2325 return ret; 2326} 2327 2328/** 2329 * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify 2330 * @mgr: manager to notify irq for. 2331 * @esi: 4 bytes from SINK_COUNT_ESI 2332 * @handled: whether the hpd interrupt was consumed or not 2333 * 2334 * This should be called from the driver when it detects a short IRQ, 2335 * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The 2336 * topology manager will process the sideband messages received as a result 2337 * of this. 2338 */ 2339int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled) 2340{ 2341 int ret = 0; 2342 int sc; 2343 *handled = false; 2344 sc = esi[0] & 0x3f; 2345 2346 if (sc != mgr->sink_count) { 2347 mgr->sink_count = sc; 2348 *handled = true; 2349 } 2350 2351 if (esi[1] & DP_DOWN_REP_MSG_RDY) { 2352 ret = drm_dp_mst_handle_down_rep(mgr); 2353 *handled = true; 2354 } 2355 2356 if (esi[1] & DP_UP_REQ_MSG_RDY) { 2357 ret |= drm_dp_mst_handle_up_req(mgr); 2358 *handled = true; 2359 } 2360 2361 drm_dp_mst_kick_tx(mgr); 2362 return ret; 2363} 2364EXPORT_SYMBOL(drm_dp_mst_hpd_irq); 2365 2366/** 2367 * drm_dp_mst_detect_port() - get connection status for an MST port 2368 * @connector: DRM connector for this port 2369 * @mgr: manager for this port 2370 * @port: unverified pointer to a port 2371 * 2372 * This returns the current connection state for a port. It validates the 2373 * port pointer still exists so the caller doesn't require a reference 2374 */ 2375enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector, 2376 struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) 2377{ 2378 enum drm_connector_status status = connector_status_disconnected; 2379 2380 /* we need to search for the port in the mgr in case its gone */ 2381 port = drm_dp_get_validated_port_ref(mgr, port); 2382 if (!port) 2383 return connector_status_disconnected; 2384 2385 if (!port->ddps) 2386 goto out; 2387 2388 switch (port->pdt) { 2389 case DP_PEER_DEVICE_NONE: 2390 case DP_PEER_DEVICE_MST_BRANCHING: 2391 break; 2392 2393 case DP_PEER_DEVICE_SST_SINK: 2394 status = connector_status_connected; 2395 /* for logical ports - cache the EDID */ 2396 if (port->port_num >= 8 && !port->cached_edid) { 2397 port->cached_edid = drm_get_edid(connector, &port->aux.ddc); 2398 } 2399 break; 2400 case DP_PEER_DEVICE_DP_LEGACY_CONV: 2401 if (port->ldps) 2402 status = connector_status_connected; 2403 break; 2404 } 2405out: 2406 drm_dp_put_port(port); 2407 return status; 2408} 2409EXPORT_SYMBOL(drm_dp_mst_detect_port); 2410 2411/** 2412 * drm_dp_mst_port_has_audio() - Check whether port has audio capability or not 2413 * @mgr: manager for this port 2414 * @port: unverified pointer to a port. 2415 * 2416 * This returns whether the port supports audio or not. 2417 */ 2418bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr, 2419 struct drm_dp_mst_port *port) 2420{ 2421 bool ret = false; 2422 2423 port = drm_dp_get_validated_port_ref(mgr, port); 2424 if (!port) 2425 return ret; 2426 ret = port->has_audio; 2427 drm_dp_put_port(port); 2428 return ret; 2429} 2430EXPORT_SYMBOL(drm_dp_mst_port_has_audio); 2431 2432/** 2433 * drm_dp_mst_get_edid() - get EDID for an MST port 2434 * @connector: toplevel connector to get EDID for 2435 * @mgr: manager for this port 2436 * @port: unverified pointer to a port. 2437 * 2438 * This returns an EDID for the port connected to a connector, 2439 * It validates the pointer still exists so the caller doesn't require a 2440 * reference. 2441 */ 2442struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) 2443{ 2444 struct edid *edid = NULL; 2445 2446 /* we need to search for the port in the mgr in case its gone */ 2447 port = drm_dp_get_validated_port_ref(mgr, port); 2448 if (!port) 2449 return NULL; 2450 2451 if (port->cached_edid) 2452 edid = drm_edid_duplicate(port->cached_edid); 2453 else { 2454 edid = drm_get_edid(connector, &port->aux.ddc); 2455 drm_mode_connector_set_tile_property(connector); 2456 } 2457 port->has_audio = drm_detect_monitor_audio(edid); 2458 drm_dp_put_port(port); 2459 return edid; 2460} 2461EXPORT_SYMBOL(drm_dp_mst_get_edid); 2462 2463/** 2464 * drm_dp_find_vcpi_slots() - find slots for this PBN value 2465 * @mgr: manager to use 2466 * @pbn: payload bandwidth to convert into slots. 2467 */ 2468int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, 2469 int pbn) 2470{ 2471 int num_slots; 2472 2473 num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div); 2474 2475 if (num_slots > mgr->avail_slots) 2476 return -ENOSPC; 2477 return num_slots; 2478} 2479EXPORT_SYMBOL(drm_dp_find_vcpi_slots); 2480 2481static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr, 2482 struct drm_dp_vcpi *vcpi, int pbn) 2483{ 2484 int num_slots; 2485 int ret; 2486 2487 num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div); 2488 2489 if (num_slots > mgr->avail_slots) 2490 return -ENOSPC; 2491 2492 vcpi->pbn = pbn; 2493 vcpi->aligned_pbn = num_slots * mgr->pbn_div; 2494 vcpi->num_slots = num_slots; 2495 2496 ret = drm_dp_mst_assign_payload_id(mgr, vcpi); 2497 if (ret < 0) 2498 return ret; 2499 return 0; 2500} 2501 2502/** 2503 * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel 2504 * @mgr: manager for this port 2505 * @port: port to allocate a virtual channel for. 2506 * @pbn: payload bandwidth number to request 2507 * @slots: returned number of slots for this PBN. 2508 */ 2509bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int pbn, int *slots) 2510{ 2511 int ret; 2512 2513 port = drm_dp_get_validated_port_ref(mgr, port); 2514 if (!port) 2515 return false; 2516 2517 if (port->vcpi.vcpi > 0) { 2518 DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn); 2519 if (pbn == port->vcpi.pbn) { 2520 *slots = port->vcpi.num_slots; 2521 drm_dp_put_port(port); 2522 return true; 2523 } 2524 } 2525 2526 ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn); 2527 if (ret) { 2528 DRM_DEBUG_KMS("failed to init vcpi %d %d %d\n", DIV_ROUND_UP(pbn, mgr->pbn_div), mgr->avail_slots, ret); 2529 goto out; 2530 } 2531 DRM_DEBUG_KMS("initing vcpi for %d %d\n", pbn, port->vcpi.num_slots); 2532 *slots = port->vcpi.num_slots; 2533 2534 drm_dp_put_port(port); 2535 return true; 2536out: 2537 return false; 2538} 2539EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi); 2540 2541int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) 2542{ 2543 int slots = 0; 2544 port = drm_dp_get_validated_port_ref(mgr, port); 2545 if (!port) 2546 return slots; 2547 2548 slots = port->vcpi.num_slots; 2549 drm_dp_put_port(port); 2550 return slots; 2551} 2552EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots); 2553 2554/** 2555 * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI 2556 * @mgr: manager for this port 2557 * @port: unverified pointer to a port. 2558 * 2559 * This just resets the number of slots for the ports VCPI for later programming. 2560 */ 2561void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) 2562{ 2563 port = drm_dp_get_validated_port_ref(mgr, port); 2564 if (!port) 2565 return; 2566 port->vcpi.num_slots = 0; 2567 drm_dp_put_port(port); 2568} 2569EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots); 2570 2571/** 2572 * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI 2573 * @mgr: manager for this port 2574 * @port: unverified port to deallocate vcpi for 2575 */ 2576void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) 2577{ 2578 port = drm_dp_get_validated_port_ref(mgr, port); 2579 if (!port) 2580 return; 2581 2582 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); 2583 port->vcpi.num_slots = 0; 2584 port->vcpi.pbn = 0; 2585 port->vcpi.aligned_pbn = 0; 2586 port->vcpi.vcpi = 0; 2587 drm_dp_put_port(port); 2588} 2589EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi); 2590 2591static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr, 2592 int id, struct drm_dp_payload *payload) 2593{ 2594 u8 payload_alloc[3], status; 2595 int ret; 2596 int retries = 0; 2597 2598 drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, 2599 DP_PAYLOAD_TABLE_UPDATED); 2600 2601 payload_alloc[0] = id; 2602 payload_alloc[1] = payload->start_slot; 2603 payload_alloc[2] = payload->num_slots; 2604 2605 ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3); 2606 if (ret != 3) { 2607 DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret); 2608 goto fail; 2609 } 2610 2611retry: 2612 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status); 2613 if (ret < 0) { 2614 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret); 2615 goto fail; 2616 } 2617 2618 if (!(status & DP_PAYLOAD_TABLE_UPDATED)) { 2619 retries++; 2620 if (retries < 20) { 2621 usleep_range(10000, 20000); 2622 goto retry; 2623 } 2624 DRM_DEBUG_KMS("status not set after read payload table status %d\n", status); 2625 ret = -EINVAL; 2626 goto fail; 2627 } 2628 ret = 0; 2629fail: 2630 return ret; 2631} 2632 2633 2634/** 2635 * drm_dp_check_act_status() - Check ACT handled status. 2636 * @mgr: manager to use 2637 * 2638 * Check the payload status bits in the DPCD for ACT handled completion. 2639 */ 2640int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr) 2641{ 2642 u8 status; 2643 int ret; 2644 int count = 0; 2645 2646 do { 2647 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status); 2648 2649 if (ret < 0) { 2650 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret); 2651 goto fail; 2652 } 2653 2654 if (status & DP_PAYLOAD_ACT_HANDLED) 2655 break; 2656 count++; 2657 udelay(100); 2658 2659 } while (count < 30); 2660 2661 if (!(status & DP_PAYLOAD_ACT_HANDLED)) { 2662 DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count); 2663 ret = -EINVAL; 2664 goto fail; 2665 } 2666 return 0; 2667fail: 2668 return ret; 2669} 2670EXPORT_SYMBOL(drm_dp_check_act_status); 2671 2672/** 2673 * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode. 2674 * @clock: dot clock for the mode 2675 * @bpp: bpp for the mode. 2676 * 2677 * This uses the formula in the spec to calculate the PBN value for a mode. 2678 */ 2679int drm_dp_calc_pbn_mode(int clock, int bpp) 2680{ 2681 u64 kbps; 2682 s64 peak_kbps; 2683 u32 numerator; 2684 u32 denominator; 2685 2686 kbps = clock * bpp; 2687 2688 /* 2689 * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006 2690 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on 2691 * common multiplier to render an integer PBN for all link rate/lane 2692 * counts combinations 2693 * calculate 2694 * peak_kbps *= (1006/1000) 2695 * peak_kbps *= (64/54) 2696 * peak_kbps *= 8 convert to bytes 2697 */ 2698 2699 numerator = 64 * 1006; 2700 denominator = 54 * 8 * 1000 * 1000; 2701 2702 kbps *= numerator; 2703 peak_kbps = drm_fixp_from_fraction(kbps, denominator); 2704 2705 return drm_fixp2int_ceil(peak_kbps); 2706} 2707EXPORT_SYMBOL(drm_dp_calc_pbn_mode); 2708 2709static int test_calc_pbn_mode(void) 2710{ 2711 int ret; 2712 ret = drm_dp_calc_pbn_mode(154000, 30); 2713 if (ret != 689) { 2714 DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n", 2715 154000, 30, 689, ret); 2716 return -EINVAL; 2717 } 2718 ret = drm_dp_calc_pbn_mode(234000, 30); 2719 if (ret != 1047) { 2720 DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n", 2721 234000, 30, 1047, ret); 2722 return -EINVAL; 2723 } 2724 ret = drm_dp_calc_pbn_mode(297000, 24); 2725 if (ret != 1063) { 2726 DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n", 2727 297000, 24, 1063, ret); 2728 return -EINVAL; 2729 } 2730 return 0; 2731} 2732 2733/* we want to kick the TX after we've ack the up/down IRQs. */ 2734static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr) 2735{ 2736 queue_work(system_long_wq, &mgr->tx_work); 2737} 2738 2739static void drm_dp_mst_dump_mstb(struct seq_file *m, 2740 struct drm_dp_mst_branch *mstb) 2741{ 2742 struct drm_dp_mst_port *port; 2743 int tabs = mstb->lct; 2744 char prefix[10]; 2745 int i; 2746 2747 for (i = 0; i < tabs; i++) 2748 prefix[i] = '\t'; 2749 prefix[i] = '\0'; 2750 2751 seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports); 2752 list_for_each_entry(port, &mstb->ports, next) { 2753 seq_printf(m, "%sport: %d: input: %d: pdt: %d, ddps: %d ldps: %d, sdp: %d/%d, %p, conn: %p\n", prefix, port->port_num, port->input, port->pdt, port->ddps, port->ldps, port->num_sdp_streams, port->num_sdp_stream_sinks, port, port->connector); 2754 if (port->mstb) 2755 drm_dp_mst_dump_mstb(m, port->mstb); 2756 } 2757} 2758 2759static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr, 2760 char *buf) 2761{ 2762 int ret; 2763 int i; 2764 for (i = 0; i < 4; i++) { 2765 ret = drm_dp_dpcd_read(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS + (i * 16), &buf[i * 16], 16); 2766 if (ret != 16) 2767 break; 2768 } 2769 if (i == 4) 2770 return true; 2771 return false; 2772} 2773 2774static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr, 2775 struct drm_dp_mst_port *port, char *name, 2776 int namelen) 2777{ 2778 struct edid *mst_edid; 2779 2780 mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port); 2781 drm_edid_get_monitor_name(mst_edid, name, namelen); 2782} 2783 2784/** 2785 * drm_dp_mst_dump_topology(): dump topology to seq file. 2786 * @m: seq_file to dump output to 2787 * @mgr: manager to dump current topology for. 2788 * 2789 * helper to dump MST topology to a seq file for debugfs. 2790 */ 2791void drm_dp_mst_dump_topology(struct seq_file *m, 2792 struct drm_dp_mst_topology_mgr *mgr) 2793{ 2794 int i; 2795 struct drm_dp_mst_port *port; 2796 2797 mutex_lock(&mgr->lock); 2798 if (mgr->mst_primary) 2799 drm_dp_mst_dump_mstb(m, mgr->mst_primary); 2800 2801 /* dump VCPIs */ 2802 mutex_unlock(&mgr->lock); 2803 2804 mutex_lock(&mgr->payload_lock); 2805 seq_printf(m, "vcpi: %lx %lx %d\n", mgr->payload_mask, mgr->vcpi_mask, 2806 mgr->max_payloads); 2807 2808 for (i = 0; i < mgr->max_payloads; i++) { 2809 if (mgr->proposed_vcpis[i]) { 2810 char name[14]; 2811 2812 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi); 2813 fetch_monitor_name(mgr, port, name, sizeof(name)); 2814 seq_printf(m, "vcpi %d: %d %d %d sink name: %s\n", i, 2815 port->port_num, port->vcpi.vcpi, 2816 port->vcpi.num_slots, 2817 (*name != 0) ? name : "Unknown"); 2818 } else 2819 seq_printf(m, "vcpi %d:unused\n", i); 2820 } 2821 for (i = 0; i < mgr->max_payloads; i++) { 2822 seq_printf(m, "payload %d: %d, %d, %d\n", 2823 i, 2824 mgr->payloads[i].payload_state, 2825 mgr->payloads[i].start_slot, 2826 mgr->payloads[i].num_slots); 2827 2828 2829 } 2830 mutex_unlock(&mgr->payload_lock); 2831 2832 mutex_lock(&mgr->lock); 2833 if (mgr->mst_primary) { 2834 u8 buf[64]; 2835 bool bret; 2836 int ret; 2837 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE); 2838 seq_printf(m, "dpcd: "); 2839 for (i = 0; i < DP_RECEIVER_CAP_SIZE; i++) 2840 seq_printf(m, "%02x ", buf[i]); 2841 seq_printf(m, "\n"); 2842 ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2); 2843 seq_printf(m, "faux/mst: "); 2844 for (i = 0; i < 2; i++) 2845 seq_printf(m, "%02x ", buf[i]); 2846 seq_printf(m, "\n"); 2847 ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1); 2848 seq_printf(m, "mst ctrl: "); 2849 for (i = 0; i < 1; i++) 2850 seq_printf(m, "%02x ", buf[i]); 2851 seq_printf(m, "\n"); 2852 2853 /* dump the standard OUI branch header */ 2854 ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE); 2855 seq_printf(m, "branch oui: "); 2856 for (i = 0; i < 0x3; i++) 2857 seq_printf(m, "%02x", buf[i]); 2858 seq_printf(m, " devid: "); 2859 for (i = 0x3; i < 0x8 && buf[i]; i++) 2860 seq_printf(m, "%c", buf[i]); 2861 2862 seq_printf(m, " revision: hw: %x.%x sw: %x.%x", buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]); 2863 seq_printf(m, "\n"); 2864 bret = dump_dp_payload_table(mgr, buf); 2865 if (bret == true) { 2866 seq_printf(m, "payload table: "); 2867 for (i = 0; i < 63; i++) 2868 seq_printf(m, "%02x ", buf[i]); 2869 seq_printf(m, "\n"); 2870 } 2871 2872 } 2873 2874 mutex_unlock(&mgr->lock); 2875 2876} 2877EXPORT_SYMBOL(drm_dp_mst_dump_topology); 2878 2879static void drm_dp_tx_work(struct work_struct *work) 2880{ 2881 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work); 2882 2883 mutex_lock(&mgr->qlock); 2884 if (!list_empty(&mgr->tx_msg_downq)) 2885 process_single_down_tx_qlock(mgr); 2886 mutex_unlock(&mgr->qlock); 2887} 2888 2889static void drm_dp_free_mst_port(struct kref *kref) 2890{ 2891 struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref); 2892 kref_put(&port->parent->kref, drm_dp_free_mst_branch_device); 2893 kfree(port); 2894} 2895 2896static void drm_dp_destroy_connector_work(struct work_struct *work) 2897{ 2898 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work); 2899 struct drm_dp_mst_port *port; 2900 bool send_hotplug = false; 2901 /* 2902 * Not a regular list traverse as we have to drop the destroy 2903 * connector lock before destroying the connector, to avoid AB->BA 2904 * ordering between this lock and the config mutex. 2905 */ 2906 for (;;) { 2907 mutex_lock(&mgr->destroy_connector_lock); 2908 port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next); 2909 if (!port) { 2910 mutex_unlock(&mgr->destroy_connector_lock); 2911 break; 2912 } 2913 list_del(&port->next); 2914 mutex_unlock(&mgr->destroy_connector_lock); 2915 2916 kref_init(&port->kref); 2917 INIT_LIST_HEAD(&port->next); 2918 2919 mgr->cbs->destroy_connector(mgr, port->connector); 2920 2921 drm_dp_port_teardown_pdt(port, port->pdt); 2922 2923 if (!port->input && port->vcpi.vcpi > 0) { 2924 drm_dp_mst_reset_vcpi_slots(mgr, port); 2925 drm_dp_update_payload_part1(mgr); 2926 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); 2927 } 2928 2929 kref_put(&port->kref, drm_dp_free_mst_port); 2930 send_hotplug = true; 2931 } 2932 if (send_hotplug) 2933 (*mgr->cbs->hotplug)(mgr); 2934} 2935 2936/** 2937 * drm_dp_mst_topology_mgr_init - initialise a topology manager 2938 * @mgr: manager struct to initialise 2939 * @dev: device providing this structure - for i2c addition. 2940 * @aux: DP helper aux channel to talk to this device 2941 * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit 2942 * @max_payloads: maximum number of payloads this GPU can source 2943 * @conn_base_id: the connector object ID the MST device is connected to. 2944 * 2945 * Return 0 for success, or negative error code on failure 2946 */ 2947int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, 2948 struct device *dev, struct drm_dp_aux *aux, 2949 int max_dpcd_transaction_bytes, 2950 int max_payloads, int conn_base_id) 2951{ 2952 mutex_init(&mgr->lock); 2953 mutex_init(&mgr->qlock); 2954 mutex_init(&mgr->payload_lock); 2955 mutex_init(&mgr->destroy_connector_lock); 2956 INIT_LIST_HEAD(&mgr->tx_msg_downq); 2957 INIT_LIST_HEAD(&mgr->destroy_connector_list); 2958 INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work); 2959 INIT_WORK(&mgr->tx_work, drm_dp_tx_work); 2960 INIT_WORK(&mgr->destroy_connector_work, drm_dp_destroy_connector_work); 2961 init_waitqueue_head(&mgr->tx_waitq); 2962 mgr->dev = dev; 2963 mgr->aux = aux; 2964 mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes; 2965 mgr->max_payloads = max_payloads; 2966 mgr->conn_base_id = conn_base_id; 2967 if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 || 2968 max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8) 2969 return -EINVAL; 2970 mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL); 2971 if (!mgr->payloads) 2972 return -ENOMEM; 2973 mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL); 2974 if (!mgr->proposed_vcpis) 2975 return -ENOMEM; 2976 set_bit(0, &mgr->payload_mask); 2977 if (test_calc_pbn_mode() < 0) 2978 DRM_ERROR("MST PBN self-test failed\n"); 2979 2980 return 0; 2981} 2982EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init); 2983 2984/** 2985 * drm_dp_mst_topology_mgr_destroy() - destroy topology manager. 2986 * @mgr: manager to destroy 2987 */ 2988void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr) 2989{ 2990 flush_work(&mgr->work); 2991 flush_work(&mgr->destroy_connector_work); 2992 mutex_lock(&mgr->payload_lock); 2993 kfree(mgr->payloads); 2994 mgr->payloads = NULL; 2995 kfree(mgr->proposed_vcpis); 2996 mgr->proposed_vcpis = NULL; 2997 mutex_unlock(&mgr->payload_lock); 2998 mgr->dev = NULL; 2999 mgr->aux = NULL; 3000} 3001EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy); 3002 3003/* I2C device */ 3004static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, 3005 int num) 3006{ 3007 struct drm_dp_aux *aux = adapter->algo_data; 3008 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux); 3009 struct drm_dp_mst_branch *mstb; 3010 struct drm_dp_mst_topology_mgr *mgr = port->mgr; 3011 unsigned int i; 3012 bool reading = false; 3013 struct drm_dp_sideband_msg_req_body msg; 3014 struct drm_dp_sideband_msg_tx *txmsg = NULL; 3015 int ret; 3016 3017 mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent); 3018 if (!mstb) 3019 return -EREMOTEIO; 3020 3021 /* construct i2c msg */ 3022 /* see if last msg is a read */ 3023 if (msgs[num - 1].flags & I2C_M_RD) 3024 reading = true; 3025 3026 if (!reading || (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)) { 3027 DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n"); 3028 ret = -EIO; 3029 goto out; 3030 } 3031 3032 memset(&msg, 0, sizeof(msg)); 3033 msg.req_type = DP_REMOTE_I2C_READ; 3034 msg.u.i2c_read.num_transactions = num - 1; 3035 msg.u.i2c_read.port_number = port->port_num; 3036 for (i = 0; i < num - 1; i++) { 3037 msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr; 3038 msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len; 3039 msg.u.i2c_read.transactions[i].bytes = msgs[i].buf; 3040 } 3041 msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr; 3042 msg.u.i2c_read.num_bytes_read = msgs[num - 1].len; 3043 3044 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 3045 if (!txmsg) { 3046 ret = -ENOMEM; 3047 goto out; 3048 } 3049 3050 txmsg->dst = mstb; 3051 drm_dp_encode_sideband_req(&msg, txmsg); 3052 3053 drm_dp_queue_down_tx(mgr, txmsg); 3054 3055 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); 3056 if (ret > 0) { 3057 3058 if (txmsg->reply.reply_type == 1) { /* got a NAK back */ 3059 ret = -EREMOTEIO; 3060 goto out; 3061 } 3062 if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) { 3063 ret = -EIO; 3064 goto out; 3065 } 3066 memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len); 3067 ret = num; 3068 } 3069out: 3070 kfree(txmsg); 3071 drm_dp_put_mst_branch_device(mstb); 3072 return ret; 3073} 3074 3075static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter) 3076{ 3077 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | 3078 I2C_FUNC_SMBUS_READ_BLOCK_DATA | 3079 I2C_FUNC_SMBUS_BLOCK_PROC_CALL | 3080 I2C_FUNC_10BIT_ADDR; 3081} 3082 3083static const struct i2c_algorithm drm_dp_mst_i2c_algo = { 3084 .functionality = drm_dp_mst_i2c_functionality, 3085 .master_xfer = drm_dp_mst_i2c_xfer, 3086}; 3087 3088/** 3089 * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX 3090 * @aux: DisplayPort AUX channel 3091 * 3092 * Returns 0 on success or a negative error code on failure. 3093 */ 3094static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux) 3095{ 3096 aux->ddc.algo = &drm_dp_mst_i2c_algo; 3097 aux->ddc.algo_data = aux; 3098 aux->ddc.retries = 3; 3099 3100 aux->ddc.class = I2C_CLASS_DDC; 3101 aux->ddc.owner = THIS_MODULE; 3102 aux->ddc.dev.parent = aux->dev; 3103 aux->ddc.dev.of_node = aux->dev->of_node; 3104 3105 strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev), 3106 sizeof(aux->ddc.name)); 3107 3108 return i2c_add_adapter(&aux->ddc); 3109} 3110 3111/** 3112 * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter 3113 * @aux: DisplayPort AUX channel 3114 */ 3115static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux) 3116{ 3117 i2c_del_adapter(&aux->ddc); 3118}