Lines Matching +full:i2c +full:- +full:topology

26 #include <linux/i2c.h>
55 * protocol. The helpers contain a topology manager and bandwidth manager.
206 number_of_bits--; in drm_dp_msg_header_crc4()
210 bitshift--; in drm_dp_msg_header_crc4()
222 number_of_bits--; in drm_dp_msg_header_crc4()
240 number_of_bits--; in drm_dp_msg_data_crc4()
244 bitshift--; in drm_dp_msg_data_crc4()
256 number_of_bits--; in drm_dp_msg_data_crc4()
268 size += (hdr->lct / 2); in drm_dp_calc_sb_hdr_size()
279 buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf); in drm_dp_encode_sideband_msg_hdr()
280 for (i = 0; i < (hdr->lct / 2); i++) in drm_dp_encode_sideband_msg_hdr()
281 buf[idx++] = hdr->rad[i]; in drm_dp_encode_sideband_msg_hdr()
282 buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) | in drm_dp_encode_sideband_msg_hdr()
283 (hdr->msg_len & 0x3f); in drm_dp_encode_sideband_msg_hdr()
284 buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4); in drm_dp_encode_sideband_msg_hdr()
286 crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1); in drm_dp_encode_sideband_msg_hdr()
287 buf[idx - 1] |= (crc4 & 0xf); in drm_dp_encode_sideband_msg_hdr()
307 crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1); in drm_dp_decode_sideband_msg_hdr()
309 if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) { in drm_dp_decode_sideband_msg_hdr()
310 drm_dbg_kms(mgr->dev, "crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]); in drm_dp_decode_sideband_msg_hdr()
314 hdr->lct = (buf[0] & 0xf0) >> 4; in drm_dp_decode_sideband_msg_hdr()
315 hdr->lcr = (buf[0] & 0xf); in drm_dp_decode_sideband_msg_hdr()
317 for (i = 0; i < (hdr->lct / 2); i++) in drm_dp_decode_sideband_msg_hdr()
318 hdr->rad[i] = buf[idx++]; in drm_dp_decode_sideband_msg_hdr()
319 hdr->broadcast = (buf[idx] >> 7) & 0x1; in drm_dp_decode_sideband_msg_hdr()
320 hdr->path_msg = (buf[idx] >> 6) & 0x1; in drm_dp_decode_sideband_msg_hdr()
321 hdr->msg_len = buf[idx] & 0x3f; in drm_dp_decode_sideband_msg_hdr()
323 hdr->somt = (buf[idx] >> 7) & 0x1; in drm_dp_decode_sideband_msg_hdr()
324 hdr->eomt = (buf[idx] >> 6) & 0x1; in drm_dp_decode_sideband_msg_hdr()
325 hdr->seqno = (buf[idx] >> 4) & 0x1; in drm_dp_decode_sideband_msg_hdr()
337 u8 *buf = raw->msg; in drm_dp_encode_sideband_req()
339 buf[idx++] = req->req_type & 0x7f; in drm_dp_encode_sideband_req()
341 switch (req->req_type) { in drm_dp_encode_sideband_req()
345 buf[idx] = (req->u.port_num.port_number & 0xf) << 4; in drm_dp_encode_sideband_req()
349 buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 | in drm_dp_encode_sideband_req()
350 (req->u.allocate_payload.number_sdp_streams & 0xf); in drm_dp_encode_sideband_req()
352 buf[idx] = (req->u.allocate_payload.vcpi & 0x7f); in drm_dp_encode_sideband_req()
354 buf[idx] = (req->u.allocate_payload.pbn >> 8); in drm_dp_encode_sideband_req()
356 buf[idx] = (req->u.allocate_payload.pbn & 0xff); in drm_dp_encode_sideband_req()
358 for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) { in drm_dp_encode_sideband_req()
359 buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) | in drm_dp_encode_sideband_req()
360 (req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf); in drm_dp_encode_sideband_req()
363 if (req->u.allocate_payload.number_sdp_streams & 1) { in drm_dp_encode_sideband_req()
364 i = req->u.allocate_payload.number_sdp_streams - 1; in drm_dp_encode_sideband_req()
365 buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4; in drm_dp_encode_sideband_req()
370 buf[idx] = (req->u.query_payload.port_number & 0xf) << 4; in drm_dp_encode_sideband_req()
372 buf[idx] = (req->u.query_payload.vcpi & 0x7f); in drm_dp_encode_sideband_req()
376 buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4; in drm_dp_encode_sideband_req()
377 buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf; in drm_dp_encode_sideband_req()
379 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8; in drm_dp_encode_sideband_req()
381 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff); in drm_dp_encode_sideband_req()
383 buf[idx] = (req->u.dpcd_read.num_bytes); in drm_dp_encode_sideband_req()
388 buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4; in drm_dp_encode_sideband_req()
389 buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf; in drm_dp_encode_sideband_req()
391 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8; in drm_dp_encode_sideband_req()
393 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff); in drm_dp_encode_sideband_req()
395 buf[idx] = (req->u.dpcd_write.num_bytes); in drm_dp_encode_sideband_req()
397 memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes); in drm_dp_encode_sideband_req()
398 idx += req->u.dpcd_write.num_bytes; in drm_dp_encode_sideband_req()
401 buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4; in drm_dp_encode_sideband_req()
402 buf[idx] |= (req->u.i2c_read.num_transactions & 0x3); in drm_dp_encode_sideband_req()
404 for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) { in drm_dp_encode_sideband_req()
405 buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f; in drm_dp_encode_sideband_req()
407 buf[idx] = req->u.i2c_read.transactions[i].num_bytes; in drm_dp_encode_sideband_req()
409 …memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes… in drm_dp_encode_sideband_req()
410 idx += req->u.i2c_read.transactions[i].num_bytes; in drm_dp_encode_sideband_req()
412 buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 4; in drm_dp_encode_sideband_req()
413 buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf); in drm_dp_encode_sideband_req()
416 buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f; in drm_dp_encode_sideband_req()
418 buf[idx] = (req->u.i2c_read.num_bytes_read); in drm_dp_encode_sideband_req()
423 buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4; in drm_dp_encode_sideband_req()
425 buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f; in drm_dp_encode_sideband_req()
427 buf[idx] = (req->u.i2c_write.num_bytes); in drm_dp_encode_sideband_req()
429 memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes); in drm_dp_encode_sideband_req()
430 idx += req->u.i2c_write.num_bytes; in drm_dp_encode_sideband_req()
435 msg = &req->u.enc_status; in drm_dp_encode_sideband_req()
436 buf[idx] = msg->stream_id; in drm_dp_encode_sideband_req()
438 memcpy(&buf[idx], msg->client_id, sizeof(msg->client_id)); in drm_dp_encode_sideband_req()
439 idx += sizeof(msg->client_id); in drm_dp_encode_sideband_req()
441 buf[idx] |= FIELD_PREP(GENMASK(1, 0), msg->stream_event); in drm_dp_encode_sideband_req()
442 buf[idx] |= msg->valid_stream_event ? BIT(2) : 0; in drm_dp_encode_sideband_req()
443 buf[idx] |= FIELD_PREP(GENMASK(4, 3), msg->stream_behavior); in drm_dp_encode_sideband_req()
444 buf[idx] |= msg->valid_stream_behavior ? BIT(5) : 0; in drm_dp_encode_sideband_req()
449 raw->cur_len = idx; in drm_dp_encode_sideband_req()
458 const u8 *buf = raw->msg; in drm_dp_decode_sideband_req()
461 req->req_type = buf[idx++] & 0x7f; in drm_dp_decode_sideband_req()
462 switch (req->req_type) { in drm_dp_decode_sideband_req()
466 req->u.port_num.port_number = (buf[idx] >> 4) & 0xf; in drm_dp_decode_sideband_req()
471 &req->u.allocate_payload; in drm_dp_decode_sideband_req()
473 a->number_sdp_streams = buf[idx] & 0xf; in drm_dp_decode_sideband_req()
474 a->port_number = (buf[idx] >> 4) & 0xf; in drm_dp_decode_sideband_req()
477 a->vcpi = buf[idx] & 0x7f; in drm_dp_decode_sideband_req()
479 a->pbn = buf[++idx] << 8; in drm_dp_decode_sideband_req()
480 a->pbn |= buf[++idx]; in drm_dp_decode_sideband_req()
483 for (i = 0; i < a->number_sdp_streams; i++) { in drm_dp_decode_sideband_req()
484 a->sdp_stream_sink[i] = in drm_dp_decode_sideband_req()
490 req->u.query_payload.port_number = (buf[idx] >> 4) & 0xf; in drm_dp_decode_sideband_req()
492 req->u.query_payload.vcpi = buf[idx] & 0x7f; in drm_dp_decode_sideband_req()
496 struct drm_dp_remote_dpcd_read *r = &req->u.dpcd_read; in drm_dp_decode_sideband_req()
498 r->port_number = (buf[idx] >> 4) & 0xf; in drm_dp_decode_sideband_req()
500 r->dpcd_address = (buf[idx] << 16) & 0xf0000; in drm_dp_decode_sideband_req()
501 r->dpcd_address |= (buf[++idx] << 8) & 0xff00; in drm_dp_decode_sideband_req()
502 r->dpcd_address |= buf[++idx] & 0xff; in drm_dp_decode_sideband_req()
504 r->num_bytes = buf[++idx]; in drm_dp_decode_sideband_req()
510 &req->u.dpcd_write; in drm_dp_decode_sideband_req()
512 w->port_number = (buf[idx] >> 4) & 0xf; in drm_dp_decode_sideband_req()
514 w->dpcd_address = (buf[idx] << 16) & 0xf0000; in drm_dp_decode_sideband_req()
515 w->dpcd_address |= (buf[++idx] << 8) & 0xff00; in drm_dp_decode_sideband_req()
516 w->dpcd_address |= buf[++idx] & 0xff; in drm_dp_decode_sideband_req()
518 w->num_bytes = buf[++idx]; in drm_dp_decode_sideband_req()
520 w->bytes = kmemdup(&buf[++idx], w->num_bytes, in drm_dp_decode_sideband_req()
522 if (!w->bytes) in drm_dp_decode_sideband_req()
523 return -ENOMEM; in drm_dp_decode_sideband_req()
528 struct drm_dp_remote_i2c_read *r = &req->u.i2c_read; in drm_dp_decode_sideband_req()
532 r->num_transactions = buf[idx] & 0x3; in drm_dp_decode_sideband_req()
533 r->port_number = (buf[idx] >> 4) & 0xf; in drm_dp_decode_sideband_req()
534 for (i = 0; i < r->num_transactions; i++) { in drm_dp_decode_sideband_req()
535 tx = &r->transactions[i]; in drm_dp_decode_sideband_req()
537 tx->i2c_dev_id = buf[++idx] & 0x7f; in drm_dp_decode_sideband_req()
538 tx->num_bytes = buf[++idx]; in drm_dp_decode_sideband_req()
539 tx->bytes = kmemdup(&buf[++idx], in drm_dp_decode_sideband_req()
540 tx->num_bytes, in drm_dp_decode_sideband_req()
542 if (!tx->bytes) { in drm_dp_decode_sideband_req()
546 idx += tx->num_bytes; in drm_dp_decode_sideband_req()
547 tx->no_stop_bit = (buf[idx] >> 5) & 0x1; in drm_dp_decode_sideband_req()
548 tx->i2c_transaction_delay = buf[idx] & 0xf; in drm_dp_decode_sideband_req()
552 for (i = 0; i < r->num_transactions; i++) { in drm_dp_decode_sideband_req()
553 tx = &r->transactions[i]; in drm_dp_decode_sideband_req()
554 kfree(tx->bytes); in drm_dp_decode_sideband_req()
556 return -ENOMEM; in drm_dp_decode_sideband_req()
559 r->read_i2c_device_id = buf[++idx] & 0x7f; in drm_dp_decode_sideband_req()
560 r->num_bytes_read = buf[++idx]; in drm_dp_decode_sideband_req()
565 struct drm_dp_remote_i2c_write *w = &req->u.i2c_write; in drm_dp_decode_sideband_req()
567 w->port_number = (buf[idx] >> 4) & 0xf; in drm_dp_decode_sideband_req()
568 w->write_i2c_device_id = buf[++idx] & 0x7f; in drm_dp_decode_sideband_req()
569 w->num_bytes = buf[++idx]; in drm_dp_decode_sideband_req()
570 w->bytes = kmemdup(&buf[++idx], w->num_bytes, in drm_dp_decode_sideband_req()
572 if (!w->bytes) in drm_dp_decode_sideband_req()
573 return -ENOMEM; in drm_dp_decode_sideband_req()
577 req->u.enc_status.stream_id = buf[idx++]; in drm_dp_decode_sideband_req()
578 for (i = 0; i < sizeof(req->u.enc_status.client_id); i++) in drm_dp_decode_sideband_req()
579 req->u.enc_status.client_id[i] = buf[idx++]; in drm_dp_decode_sideband_req()
581 req->u.enc_status.stream_event = FIELD_GET(GENMASK(1, 0), in drm_dp_decode_sideband_req()
583 req->u.enc_status.valid_stream_event = FIELD_GET(BIT(2), in drm_dp_decode_sideband_req()
585 req->u.enc_status.stream_behavior = FIELD_GET(GENMASK(4, 3), in drm_dp_decode_sideband_req()
587 req->u.enc_status.valid_stream_behavior = FIELD_GET(BIT(5), in drm_dp_decode_sideband_req()
603 if (req->req_type == DP_LINK_ADDRESS) { in drm_dp_dump_sideband_msg_req_body()
605 P("type=%s\n", drm_dp_mst_req_type_str(req->req_type)); in drm_dp_dump_sideband_msg_req_body()
609 P("type=%s contents:\n", drm_dp_mst_req_type_str(req->req_type)); in drm_dp_dump_sideband_msg_req_body()
612 switch (req->req_type) { in drm_dp_dump_sideband_msg_req_body()
616 P("port=%d\n", req->u.port_num.port_number); in drm_dp_dump_sideband_msg_req_body()
620 req->u.allocate_payload.port_number, in drm_dp_dump_sideband_msg_req_body()
621 req->u.allocate_payload.vcpi, req->u.allocate_payload.pbn, in drm_dp_dump_sideband_msg_req_body()
622 req->u.allocate_payload.number_sdp_streams, in drm_dp_dump_sideband_msg_req_body()
623 req->u.allocate_payload.number_sdp_streams, in drm_dp_dump_sideband_msg_req_body()
624 req->u.allocate_payload.sdp_stream_sink); in drm_dp_dump_sideband_msg_req_body()
628 req->u.query_payload.port_number, in drm_dp_dump_sideband_msg_req_body()
629 req->u.query_payload.vcpi); in drm_dp_dump_sideband_msg_req_body()
633 req->u.dpcd_read.port_number, req->u.dpcd_read.dpcd_address, in drm_dp_dump_sideband_msg_req_body()
634 req->u.dpcd_read.num_bytes); in drm_dp_dump_sideband_msg_req_body()
638 req->u.dpcd_write.port_number, in drm_dp_dump_sideband_msg_req_body()
639 req->u.dpcd_write.dpcd_address, in drm_dp_dump_sideband_msg_req_body()
640 req->u.dpcd_write.num_bytes, req->u.dpcd_write.num_bytes, in drm_dp_dump_sideband_msg_req_body()
641 req->u.dpcd_write.bytes); in drm_dp_dump_sideband_msg_req_body()
645 req->u.i2c_read.port_number, in drm_dp_dump_sideband_msg_req_body()
646 req->u.i2c_read.num_transactions, in drm_dp_dump_sideband_msg_req_body()
647 req->u.i2c_read.read_i2c_device_id, in drm_dp_dump_sideband_msg_req_body()
648 req->u.i2c_read.num_bytes_read); in drm_dp_dump_sideband_msg_req_body()
651 for (i = 0; i < req->u.i2c_read.num_transactions; i++) { in drm_dp_dump_sideband_msg_req_body()
653 &req->u.i2c_read.transactions[i]; in drm_dp_dump_sideband_msg_req_body()
656 i, rtx->i2c_dev_id, rtx->num_bytes, in drm_dp_dump_sideband_msg_req_body()
657 rtx->no_stop_bit, rtx->i2c_transaction_delay, in drm_dp_dump_sideband_msg_req_body()
658 rtx->num_bytes, rtx->bytes); in drm_dp_dump_sideband_msg_req_body()
663 req->u.i2c_write.port_number, in drm_dp_dump_sideband_msg_req_body()
664 req->u.i2c_write.write_i2c_device_id, in drm_dp_dump_sideband_msg_req_body()
665 req->u.i2c_write.num_bytes, req->u.i2c_write.num_bytes, in drm_dp_dump_sideband_msg_req_body()
666 req->u.i2c_write.bytes); in drm_dp_dump_sideband_msg_req_body()
671 req->u.enc_status.stream_id, in drm_dp_dump_sideband_msg_req_body()
672 (int)ARRAY_SIZE(req->u.enc_status.client_id), in drm_dp_dump_sideband_msg_req_body()
673 req->u.enc_status.client_id, req->u.enc_status.stream_event, in drm_dp_dump_sideband_msg_req_body()
674 req->u.enc_status.valid_stream_event, in drm_dp_dump_sideband_msg_req_body()
675 req->u.enc_status.stream_behavior, in drm_dp_dump_sideband_msg_req_body()
676 req->u.enc_status.valid_stream_behavior); in drm_dp_dump_sideband_msg_req_body()
695 drm_dp_mst_rad_to_str(txmsg->dst->rad, txmsg->dst->lct, buf, in drm_dp_mst_dump_sideband_msg_tx()
698 txmsg->cur_offset, txmsg->cur_len, txmsg->seqno, in drm_dp_mst_dump_sideband_msg_tx()
699 drm_dp_mst_sideband_tx_state_str(txmsg->state), in drm_dp_mst_dump_sideband_msg_tx()
700 txmsg->path_msg, buf); in drm_dp_mst_dump_sideband_msg_tx()
735 u8 *buf = raw->msg; in drm_dp_encode_sideband_reply()
737 buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f); in drm_dp_encode_sideband_reply()
739 raw->cur_len = idx; in drm_dp_encode_sideband_reply()
747 * ignore out-of-order messages or messages that are part of a in drm_dp_sideband_msg_set_header()
750 if (!hdr->somt && !msg->have_somt) in drm_dp_sideband_msg_set_header()
754 msg->curchunk_idx = 0; in drm_dp_sideband_msg_set_header()
755 msg->curchunk_len = hdr->msg_len; in drm_dp_sideband_msg_set_header()
756 msg->curchunk_hdrlen = hdrlen; in drm_dp_sideband_msg_set_header()
758 /* we have already gotten an somt - don't bother parsing */ in drm_dp_sideband_msg_set_header()
759 if (hdr->somt && msg->have_somt) in drm_dp_sideband_msg_set_header()
762 if (hdr->somt) { in drm_dp_sideband_msg_set_header()
763 memcpy(&msg->initial_hdr, hdr, in drm_dp_sideband_msg_set_header()
765 msg->have_somt = true; in drm_dp_sideband_msg_set_header()
767 if (hdr->eomt) in drm_dp_sideband_msg_set_header()
768 msg->have_eomt = true; in drm_dp_sideband_msg_set_header()
779 memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen); in drm_dp_sideband_append_payload()
780 msg->curchunk_idx += replybuflen; in drm_dp_sideband_append_payload()
782 if (msg->curchunk_idx >= msg->curchunk_len) { in drm_dp_sideband_append_payload()
784 crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1); in drm_dp_sideband_append_payload()
785 if (crc4 != msg->chunk[msg->curchunk_len - 1]) in drm_dp_sideband_append_payload()
788 msg->chunk, msg->curchunk_len, false); in drm_dp_sideband_append_payload()
790 memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1); in drm_dp_sideband_append_payload()
791 msg->curlen += msg->curchunk_len - 1; in drm_dp_sideband_append_payload()
803 memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16); in drm_dp_sideband_parse_link_address()
805 repmsg->u.link_addr.nports = raw->msg[idx] & 0xf; in drm_dp_sideband_parse_link_address()
807 if (idx > raw->curlen) in drm_dp_sideband_parse_link_address()
809 for (i = 0; i < repmsg->u.link_addr.nports; i++) { in drm_dp_sideband_parse_link_address()
810 if (raw->msg[idx] & 0x80) in drm_dp_sideband_parse_link_address()
811 repmsg->u.link_addr.ports[i].input_port = 1; in drm_dp_sideband_parse_link_address()
813 repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7; in drm_dp_sideband_parse_link_address()
814 repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf); in drm_dp_sideband_parse_link_address()
817 if (idx > raw->curlen) in drm_dp_sideband_parse_link_address()
819 repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1; in drm_dp_sideband_parse_link_address()
820 repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1; in drm_dp_sideband_parse_link_address()
821 if (repmsg->u.link_addr.ports[i].input_port == 0) in drm_dp_sideband_parse_link_address()
822 repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1; in drm_dp_sideband_parse_link_address()
824 if (idx > raw->curlen) in drm_dp_sideband_parse_link_address()
826 if (repmsg->u.link_addr.ports[i].input_port == 0) { in drm_dp_sideband_parse_link_address()
827 repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]); in drm_dp_sideband_parse_link_address()
829 if (idx > raw->curlen) in drm_dp_sideband_parse_link_address()
831 memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16); in drm_dp_sideband_parse_link_address()
833 if (idx > raw->curlen) in drm_dp_sideband_parse_link_address()
835 repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf; in drm_dp_sideband_parse_link_address()
836 repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf); in drm_dp_sideband_parse_link_address()
840 if (idx > raw->curlen) in drm_dp_sideband_parse_link_address()
846 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen); in drm_dp_sideband_parse_link_address()
855 repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf; in drm_dp_sideband_parse_remote_dpcd_read()
857 if (idx > raw->curlen) in drm_dp_sideband_parse_remote_dpcd_read()
859 repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx]; in drm_dp_sideband_parse_remote_dpcd_read()
861 if (idx > raw->curlen) in drm_dp_sideband_parse_remote_dpcd_read()
864 …memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_by… in drm_dp_sideband_parse_remote_dpcd_read()
867 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen); in drm_dp_sideband_parse_remote_dpcd_read()
876 repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf; in drm_dp_sideband_parse_remote_dpcd_write()
878 if (idx > raw->curlen) in drm_dp_sideband_parse_remote_dpcd_write()
882 DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen); in drm_dp_sideband_parse_remote_dpcd_write()
891 repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf); in drm_dp_sideband_parse_remote_i2c_read_ack()
893 if (idx > raw->curlen) in drm_dp_sideband_parse_remote_i2c_read_ack()
895 repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx]; in drm_dp_sideband_parse_remote_i2c_read_ack()
898 …memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_byte… in drm_dp_sideband_parse_remote_i2c_read_ack()
901 DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen); in drm_dp_sideband_parse_remote_i2c_read_ack()
910 repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf; in drm_dp_sideband_parse_enum_path_resources_ack()
911 repmsg->u.path_resources.fec_capable = raw->msg[idx] & 0x1; in drm_dp_sideband_parse_enum_path_resources_ack()
913 if (idx > raw->curlen) in drm_dp_sideband_parse_enum_path_resources_ack()
915 repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]); in drm_dp_sideband_parse_enum_path_resources_ack()
917 if (idx > raw->curlen) in drm_dp_sideband_parse_enum_path_resources_ack()
919 repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]); in drm_dp_sideband_parse_enum_path_resources_ack()
921 if (idx > raw->curlen) in drm_dp_sideband_parse_enum_path_resources_ack()
925 DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen); in drm_dp_sideband_parse_enum_path_resources_ack()
934 repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf; in drm_dp_sideband_parse_allocate_payload_ack()
936 if (idx > raw->curlen) in drm_dp_sideband_parse_allocate_payload_ack()
938 repmsg->u.allocate_payload.vcpi = raw->msg[idx]; in drm_dp_sideband_parse_allocate_payload_ack()
940 if (idx > raw->curlen) in drm_dp_sideband_parse_allocate_payload_ack()
942 repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]); in drm_dp_sideband_parse_allocate_payload_ack()
944 if (idx > raw->curlen) in drm_dp_sideband_parse_allocate_payload_ack()
948 DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen); in drm_dp_sideband_parse_allocate_payload_ack()
957 repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf; in drm_dp_sideband_parse_query_payload_ack()
959 if (idx > raw->curlen) in drm_dp_sideband_parse_query_payload_ack()
961 repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]); in drm_dp_sideband_parse_query_payload_ack()
963 if (idx > raw->curlen) in drm_dp_sideband_parse_query_payload_ack()
967 DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen); in drm_dp_sideband_parse_query_payload_ack()
976 repmsg->u.port_number.port_number = (raw->msg[idx] >> 4) & 0xf; in drm_dp_sideband_parse_power_updown_phy_ack()
978 if (idx > raw->curlen) { in drm_dp_sideband_parse_power_updown_phy_ack()
980 idx, raw->curlen); in drm_dp_sideband_parse_power_updown_phy_ack()
993 reply = &repmsg->u.enc_status; in drm_dp_sideband_parse_query_stream_enc_status()
995 reply->stream_id = raw->msg[3]; in drm_dp_sideband_parse_query_stream_enc_status()
997 reply->reply_signed = raw->msg[2] & BIT(0); in drm_dp_sideband_parse_query_stream_enc_status()
1007 reply->hdcp_1x_device_present = raw->msg[2] & BIT(4); in drm_dp_sideband_parse_query_stream_enc_status()
1008 reply->hdcp_2x_device_present = raw->msg[2] & BIT(3); in drm_dp_sideband_parse_query_stream_enc_status()
1010 reply->query_capable_device_present = raw->msg[2] & BIT(5); in drm_dp_sideband_parse_query_stream_enc_status()
1011 reply->legacy_device_present = raw->msg[2] & BIT(6); in drm_dp_sideband_parse_query_stream_enc_status()
1012 reply->unauthorizable_device_present = raw->msg[2] & BIT(7); in drm_dp_sideband_parse_query_stream_enc_status()
1014 reply->auth_completed = !!(raw->msg[1] & BIT(3)); in drm_dp_sideband_parse_query_stream_enc_status()
1015 reply->encryption_enabled = !!(raw->msg[1] & BIT(4)); in drm_dp_sideband_parse_query_stream_enc_status()
1016 reply->repeater_present = !!(raw->msg[1] & BIT(5)); in drm_dp_sideband_parse_query_stream_enc_status()
1017 reply->state = (raw->msg[1] & GENMASK(7, 6)) >> 6; in drm_dp_sideband_parse_query_stream_enc_status()
1027 msg->reply_type = (raw->msg[0] & 0x80) >> 7; in drm_dp_sideband_parse_reply()
1028 msg->req_type = (raw->msg[0] & 0x7f); in drm_dp_sideband_parse_reply()
1030 if (msg->reply_type == DP_SIDEBAND_REPLY_NAK) { in drm_dp_sideband_parse_reply()
1031 memcpy(msg->u.nak.guid, &raw->msg[1], 16); in drm_dp_sideband_parse_reply()
1032 msg->u.nak.reason = raw->msg[17]; in drm_dp_sideband_parse_reply()
1033 msg->u.nak.nak_data = raw->msg[18]; in drm_dp_sideband_parse_reply()
1037 switch (msg->req_type) { in drm_dp_sideband_parse_reply()
1062 drm_err(mgr->dev, "Got unknown reply 0x%02x (%s)\n", in drm_dp_sideband_parse_reply()
1063 msg->req_type, drm_dp_mst_req_type_str(msg->req_type)); in drm_dp_sideband_parse_reply()
1075 msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4; in drm_dp_sideband_parse_connection_status_notify()
1077 if (idx > raw->curlen) in drm_dp_sideband_parse_connection_status_notify()
1080 memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16); in drm_dp_sideband_parse_connection_status_notify()
1082 if (idx > raw->curlen) in drm_dp_sideband_parse_connection_status_notify()
1085 msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1; in drm_dp_sideband_parse_connection_status_notify()
1086 msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1; in drm_dp_sideband_parse_connection_status_notify()
1087 msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1; in drm_dp_sideband_parse_connection_status_notify()
1088 msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1; in drm_dp_sideband_parse_connection_status_notify()
1089 msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7); in drm_dp_sideband_parse_connection_status_notify()
1093 drm_dbg_kms(mgr->dev, "connection status reply parse length fail %d %d\n", in drm_dp_sideband_parse_connection_status_notify()
1094 idx, raw->curlen); in drm_dp_sideband_parse_connection_status_notify()
1104 msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4; in drm_dp_sideband_parse_resource_status_notify()
1106 if (idx > raw->curlen) in drm_dp_sideband_parse_resource_status_notify()
1109 memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16); in drm_dp_sideband_parse_resource_status_notify()
1111 if (idx > raw->curlen) in drm_dp_sideband_parse_resource_status_notify()
1114 msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]); in drm_dp_sideband_parse_resource_status_notify()
1118 drm_dbg_kms(mgr->dev, "resource status reply parse length fail %d %d\n", idx, raw->curlen); in drm_dp_sideband_parse_resource_status_notify()
1127 msg->req_type = (raw->msg[0] & 0x7f); in drm_dp_sideband_parse_req()
1129 switch (msg->req_type) { in drm_dp_sideband_parse_req()
1135 drm_err(mgr->dev, "Got unknown request 0x%02x (%s)\n", in drm_dp_sideband_parse_req()
1136 msg->req_type, drm_dp_mst_req_type_str(msg->req_type)); in drm_dp_sideband_parse_req()
1168 msg->path_msg = true; in build_clear_payload_id_table()
1179 msg->path_msg = true; in build_enum_path_resources()
1200 msg->path_msg = true; in build_allocate_payload()
1215 msg->path_msg = true; in build_power_updown_phy()
1242 mutex_lock(&mgr->payload_lock); in drm_dp_mst_assign_payload_id()
1243 ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1); in drm_dp_mst_assign_payload_id()
1244 if (ret > mgr->max_payloads) { in drm_dp_mst_assign_payload_id()
1245 ret = -EINVAL; in drm_dp_mst_assign_payload_id()
1246 drm_dbg_kms(mgr->dev, "out of payload ids %d\n", ret); in drm_dp_mst_assign_payload_id()
1250 vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1); in drm_dp_mst_assign_payload_id()
1251 if (vcpi_ret > mgr->max_payloads) { in drm_dp_mst_assign_payload_id()
1252 ret = -EINVAL; in drm_dp_mst_assign_payload_id()
1253 drm_dbg_kms(mgr->dev, "out of vcpi ids %d\n", ret); in drm_dp_mst_assign_payload_id()
1257 set_bit(ret, &mgr->payload_mask); in drm_dp_mst_assign_payload_id()
1258 set_bit(vcpi_ret, &mgr->vcpi_mask); in drm_dp_mst_assign_payload_id()
1259 vcpi->vcpi = vcpi_ret + 1; in drm_dp_mst_assign_payload_id()
1260 mgr->proposed_vcpis[ret - 1] = vcpi; in drm_dp_mst_assign_payload_id()
1262 mutex_unlock(&mgr->payload_lock); in drm_dp_mst_assign_payload_id()
1274 mutex_lock(&mgr->payload_lock); in drm_dp_mst_put_payload_id()
1275 drm_dbg_kms(mgr->dev, "putting payload %d\n", vcpi); in drm_dp_mst_put_payload_id()
1276 clear_bit(vcpi - 1, &mgr->vcpi_mask); in drm_dp_mst_put_payload_id()
1278 for (i = 0; i < mgr->max_payloads; i++) { in drm_dp_mst_put_payload_id()
1279 if (mgr->proposed_vcpis[i] && in drm_dp_mst_put_payload_id()
1280 mgr->proposed_vcpis[i]->vcpi == vcpi) { in drm_dp_mst_put_payload_id()
1281 mgr->proposed_vcpis[i] = NULL; in drm_dp_mst_put_payload_id()
1282 clear_bit(i + 1, &mgr->payload_mask); in drm_dp_mst_put_payload_id()
1285 mutex_unlock(&mgr->payload_lock); in drm_dp_mst_put_payload_id()
1294 * All updates to txmsg->state are protected by mgr->qlock, and the two in check_txmsg_state()
1298 state = READ_ONCE(txmsg->state); in check_txmsg_state()
1306 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; in drm_dp_mst_wait_tx_reply()
1314 * poll-waiting for the MST reply interrupt if we didn't receive in drm_dp_mst_wait_tx_reply()
1318 * Club 3D CAC-1557 TypeC -> DP adapter which for some reason in drm_dp_mst_wait_tx_reply()
1325 ret = wait_event_timeout(mgr->tx_waitq, in drm_dp_mst_wait_tx_reply()
1327 mgr->cbs->poll_hpd_irq ? in drm_dp_mst_wait_tx_reply()
1331 if (ret || !mgr->cbs->poll_hpd_irq || in drm_dp_mst_wait_tx_reply()
1335 mgr->cbs->poll_hpd_irq(mgr); in drm_dp_mst_wait_tx_reply()
1338 mutex_lock(&mgr->qlock); in drm_dp_mst_wait_tx_reply()
1340 if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) { in drm_dp_mst_wait_tx_reply()
1341 ret = -EIO; in drm_dp_mst_wait_tx_reply()
1345 drm_dbg_kms(mgr->dev, "timedout msg send %p %d %d\n", in drm_dp_mst_wait_tx_reply()
1346 txmsg, txmsg->state, txmsg->seqno); in drm_dp_mst_wait_tx_reply()
1349 ret = -EIO; in drm_dp_mst_wait_tx_reply()
1352 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED || in drm_dp_mst_wait_tx_reply()
1353 txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND || in drm_dp_mst_wait_tx_reply()
1354 txmsg->state == DRM_DP_SIDEBAND_TX_SENT) in drm_dp_mst_wait_tx_reply()
1355 list_del(&txmsg->next); in drm_dp_mst_wait_tx_reply()
1358 if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) { in drm_dp_mst_wait_tx_reply()
1363 mutex_unlock(&mgr->qlock); in drm_dp_mst_wait_tx_reply()
1377 mstb->lct = lct; in drm_dp_add_mst_branch_device()
1379 memcpy(mstb->rad, rad, lct / 2); in drm_dp_add_mst_branch_device()
1380 INIT_LIST_HEAD(&mstb->ports); in drm_dp_add_mst_branch_device()
1381 kref_init(&mstb->topology_kref); in drm_dp_add_mst_branch_device()
1382 kref_init(&mstb->malloc_kref); in drm_dp_add_mst_branch_device()
1391 if (mstb->port_parent) in drm_dp_free_mst_branch_device()
1392 drm_dp_mst_put_port_malloc(mstb->port_parent); in drm_dp_free_mst_branch_device()
1400 * Topology refcount overview
1405 * two different kinds of refcounts: topology refcounts, and malloc refcounts.
1407 * Topology refcounts are not exposed to drivers, and are handled internally
1409 * in-memory topology state from being changed in the middle of critical
1412 * of the topology until its topology refcount reaches zero. Additionally,
1420 * drm_dp_mst_branch allocated even after all of its topology references have
1422 * branch's last known state before it was disconnected from the topology.
1430 * helpers. Exposing this API to drivers in a race-free manner would take more
1434 * Refcount relationships in a topology
1437 * Let's take a look at why the relationship between topology and malloc
1440 * .. kernel-figure:: dp-mst/topology-figure-1.dot
1442 * An example of topology and malloc refs in a DP MST topology with two
1443 * active payloads. Topology refcount increments are indicated by solid
1449 * As you can see in the above figure, every branch increments the topology
1456 * topology would start to look like the figure below.
1458 * .. kernel-figure:: dp-mst/topology-figure-2.dot
1463 * Whenever a port or branch device's topology refcount reaches zero, it will
1464 * decrement the topology refcounts of all its children, the malloc refcount
1466 * #4, this means they both have been disconnected from the topology and freed
1468 * #3, port #3 is removed from the topology but its &struct drm_dp_mst_port
1476 * connected to the topology. In this case, we would travel up the topology as
1479 * .. kernel-figure:: dp-mst/topology-figure-3.dot
1486 * drm_dp_mst_get_mstb_malloc() - Increment the malloc refcount of a branch
1499 kref_get(&mstb->malloc_kref); in drm_dp_mst_get_mstb_malloc()
1500 drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref)); in drm_dp_mst_get_mstb_malloc()
1504 * drm_dp_mst_put_mstb_malloc() - Decrement the malloc refcount of a branch
1517 drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref) - 1); in drm_dp_mst_put_mstb_malloc()
1518 kref_put(&mstb->malloc_kref, drm_dp_free_mst_branch_device); in drm_dp_mst_put_mstb_malloc()
1526 drm_dp_mst_put_mstb_malloc(port->parent); in drm_dp_free_mst_port()
1531 * drm_dp_mst_get_port_malloc() - Increment the malloc refcount of an MST port
1550 kref_get(&port->malloc_kref); in drm_dp_mst_get_port_malloc()
1551 drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->malloc_kref)); in drm_dp_mst_get_port_malloc()
1556 * drm_dp_mst_put_port_malloc() - Decrement the malloc refcount of an MST port
1568 drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->malloc_kref) - 1); in drm_dp_mst_put_port_malloc()
1569 kref_put(&port->malloc_kref, drm_dp_free_mst_port); in drm_dp_mst_put_port_malloc()
1594 for (i = 0; i < history->len; i++) { in __topology_ref_save()
1595 if (history->entries[i].backtrace == backtrace) { in __topology_ref_save()
1596 entry = &history->entries[i]; in __topology_ref_save()
1604 int new_len = history->len + 1; in __topology_ref_save()
1606 new = krealloc(history->entries, sizeof(*new) * new_len, in __topology_ref_save()
1611 entry = &new[history->len]; in __topology_ref_save()
1612 history->len = new_len; in __topology_ref_save()
1613 history->entries = new; in __topology_ref_save()
1615 entry->backtrace = backtrace; in __topology_ref_save()
1616 entry->type = type; in __topology_ref_save()
1617 entry->count = 0; in __topology_ref_save()
1619 entry->count++; in __topology_ref_save()
1620 entry->ts_nsec = ktime_get_ns(); in __topology_ref_save()
1628 if (entry_a->ts_nsec > entry_b->ts_nsec) in topology_ref_history_cmp()
1630 else if (entry_a->ts_nsec < entry_b->ts_nsec) in topology_ref_history_cmp()
1631 return -1; in topology_ref_history_cmp()
1656 if (!history->len) in __dump_topology_ref_history()
1662 sort(history->entries, history->len, sizeof(*history->entries), in __dump_topology_ref_history()
1665 drm_printf(&p, "%s (%p) topology count reached 0, dumping history:\n", in __dump_topology_ref_history()
1668 for (i = 0; i < history->len; i++) { in __dump_topology_ref_history()
1670 &history->entries[i]; in __dump_topology_ref_history()
1673 u64 ts_nsec = entry->ts_nsec; in __dump_topology_ref_history()
1676 nr_entries = stack_depot_fetch(entry->backtrace, &entries); in __dump_topology_ref_history()
1680 entry->count, in __dump_topology_ref_history()
1681 topology_ref_type_to_str(entry->type), in __dump_topology_ref_history()
1686 kfree(history->entries); in __dump_topology_ref_history()
1694 __dump_topology_ref_history(&mstb->topology_ref_history, mstb, in drm_dp_mst_dump_mstb_topology_history()
1701 __dump_topology_ref_history(&port->topology_ref_history, port, in drm_dp_mst_dump_port_topology_history()
1709 __topology_ref_save(mstb->mgr, &mstb->topology_ref_history, type); in save_mstb_topology_ref()
1716 __topology_ref_save(port->mgr, &port->topology_ref_history, type); in save_port_topology_ref()
1722 mutex_lock(&mgr->topology_ref_history_lock); in topology_ref_history_lock()
1728 mutex_unlock(&mgr->topology_ref_history_lock); in topology_ref_history_unlock()
1747 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; in drm_dp_destroy_mst_branch_device()
1751 INIT_LIST_HEAD(&mstb->destroy_next); in drm_dp_destroy_mst_branch_device()
1754 * This can get called under mgr->mutex, so we need to perform the in drm_dp_destroy_mst_branch_device()
1757 mutex_lock(&mgr->delayed_destroy_lock); in drm_dp_destroy_mst_branch_device()
1758 list_add(&mstb->destroy_next, &mgr->destroy_branch_device_list); in drm_dp_destroy_mst_branch_device()
1759 mutex_unlock(&mgr->delayed_destroy_lock); in drm_dp_destroy_mst_branch_device()
1760 queue_work(mgr->delayed_destroy_wq, &mgr->delayed_destroy_work); in drm_dp_destroy_mst_branch_device()
1764 * drm_dp_mst_topology_try_get_mstb() - Increment the topology refcount of a
1766 * @mstb: &struct drm_dp_mst_branch to increment the topology refcount of
1768 * Attempts to grab a topology reference to @mstb, if it hasn't yet been
1769 * removed from the topology (e.g. &drm_dp_mst_branch.topology_kref has
1770 * reached 0). Holding a topology reference implies that a malloc reference
1771 * will be held to @mstb as long as the user holds the topology reference.
1774 * reference to @mstb. If you already have a topology reference to @mstb, you
1782 * * 1: A topology reference was grabbed successfully
1783 * * 0: @port is no longer in the topology, no reference was grabbed
1790 topology_ref_history_lock(mstb->mgr); in drm_dp_mst_topology_try_get_mstb()
1791 ret = kref_get_unless_zero(&mstb->topology_kref); in drm_dp_mst_topology_try_get_mstb()
1793 drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref)); in drm_dp_mst_topology_try_get_mstb()
1797 topology_ref_history_unlock(mstb->mgr); in drm_dp_mst_topology_try_get_mstb()
1803 * drm_dp_mst_topology_get_mstb() - Increment the topology refcount of a
1805 * @mstb: The &struct drm_dp_mst_branch to increment the topology refcount of
1809 * you are already guaranteed to have at least one active topology reference
1818 topology_ref_history_lock(mstb->mgr); in drm_dp_mst_topology_get_mstb()
1821 WARN_ON(kref_read(&mstb->topology_kref) == 0); in drm_dp_mst_topology_get_mstb()
1822 kref_get(&mstb->topology_kref); in drm_dp_mst_topology_get_mstb()
1823 drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref)); in drm_dp_mst_topology_get_mstb()
1825 topology_ref_history_unlock(mstb->mgr); in drm_dp_mst_topology_get_mstb()
1829 * drm_dp_mst_topology_put_mstb() - release a topology reference to a branch
1831 * @mstb: The &struct drm_dp_mst_branch to release the topology reference from
1833 * Releases a topology reference from @mstb by decrementing
1843 topology_ref_history_lock(mstb->mgr); in drm_dp_mst_topology_put_mstb()
1845 drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref) - 1); in drm_dp_mst_topology_put_mstb()
1848 topology_ref_history_unlock(mstb->mgr); in drm_dp_mst_topology_put_mstb()
1849 kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device); in drm_dp_mst_topology_put_mstb()
1856 struct drm_dp_mst_topology_mgr *mgr = port->mgr; in drm_dp_destroy_port()
1861 if (port->input) { in drm_dp_destroy_port()
1866 kfree(port->cached_edid); in drm_dp_destroy_port()
1872 mutex_lock(&mgr->delayed_destroy_lock); in drm_dp_destroy_port()
1873 list_add(&port->next, &mgr->destroy_port_list); in drm_dp_destroy_port()
1874 mutex_unlock(&mgr->delayed_destroy_lock); in drm_dp_destroy_port()
1875 queue_work(mgr->delayed_destroy_wq, &mgr->delayed_destroy_work); in drm_dp_destroy_port()
1879 * drm_dp_mst_topology_try_get_port() - Increment the topology refcount of a
1881 * @port: &struct drm_dp_mst_port to increment the topology refcount of
1883 * Attempts to grab a topology reference to @port, if it hasn't yet been
1884 * removed from the topology (e.g. &drm_dp_mst_port.topology_kref has reached
1885 * 0). Holding a topology reference implies that a malloc reference will be
1886 * held to @port as long as the user holds the topology reference.
1889 * reference to @port. If you already have a topology reference to @port, you
1897 * * 1: A topology reference was grabbed successfully
1898 * * 0: @port is no longer in the topology, no reference was grabbed
1905 topology_ref_history_lock(port->mgr); in drm_dp_mst_topology_try_get_port()
1906 ret = kref_get_unless_zero(&port->topology_kref); in drm_dp_mst_topology_try_get_port()
1908 drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->topology_kref)); in drm_dp_mst_topology_try_get_port()
1912 topology_ref_history_unlock(port->mgr); in drm_dp_mst_topology_try_get_port()
1917 * drm_dp_mst_topology_get_port() - Increment the topology refcount of a port
1918 * @port: The &struct drm_dp_mst_port to increment the topology refcount of
1922 * you are already guaranteed to have at least one active topology reference
1931 topology_ref_history_lock(port->mgr); in drm_dp_mst_topology_get_port()
1933 WARN_ON(kref_read(&port->topology_kref) == 0); in drm_dp_mst_topology_get_port()
1934 kref_get(&port->topology_kref); in drm_dp_mst_topology_get_port()
1935 drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->topology_kref)); in drm_dp_mst_topology_get_port()
1938 topology_ref_history_unlock(port->mgr); in drm_dp_mst_topology_get_port()
1942 * drm_dp_mst_topology_put_port() - release a topology reference to a port
1943 * @port: The &struct drm_dp_mst_port to release the topology reference from
1945 * Releases a topology reference from @port by decrementing
1954 topology_ref_history_lock(port->mgr); in drm_dp_mst_topology_put_port()
1956 drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->topology_kref) - 1); in drm_dp_mst_topology_put_port()
1959 topology_ref_history_unlock(port->mgr); in drm_dp_mst_topology_put_port()
1960 kref_put(&port->topology_kref, drm_dp_destroy_port); in drm_dp_mst_topology_put_port()
1973 list_for_each_entry(port, &mstb->ports, next) { in drm_dp_mst_topology_get_mstb_validated_locked()
1974 if (port->mstb) { in drm_dp_mst_topology_get_mstb_validated_locked()
1976 port->mstb, to_find); in drm_dp_mst_topology_get_mstb_validated_locked()
1990 mutex_lock(&mgr->lock); in drm_dp_mst_topology_get_mstb_validated()
1991 if (mgr->mst_primary) { in drm_dp_mst_topology_get_mstb_validated()
1993 mgr->mst_primary, mstb); in drm_dp_mst_topology_get_mstb_validated()
1998 mutex_unlock(&mgr->lock); in drm_dp_mst_topology_get_mstb_validated()
2008 list_for_each_entry(port, &mstb->ports, next) { in drm_dp_mst_topology_get_port_validated_locked()
2012 if (port->mstb) { in drm_dp_mst_topology_get_port_validated_locked()
2014 port->mstb, to_find); in drm_dp_mst_topology_get_port_validated_locked()
2028 mutex_lock(&mgr->lock); in drm_dp_mst_topology_get_port_validated()
2029 if (mgr->mst_primary) { in drm_dp_mst_topology_get_port_validated()
2031 mgr->mst_primary, port); in drm_dp_mst_topology_get_port_validated()
2036 mutex_unlock(&mgr->lock); in drm_dp_mst_topology_get_port_validated()
2045 list_for_each_entry(port, &mstb->ports, next) { in drm_dp_get_port()
2046 if (port->port_num == port_num) { in drm_dp_get_port()
2063 int parent_lct = port->parent->lct; in drm_dp_calculate_rad()
2065 int idx = (parent_lct - 1) / 2; in drm_dp_calculate_rad()
2068 memcpy(rad, port->parent->rad, idx + 1); in drm_dp_calculate_rad()
2073 rad[idx] |= port->port_num << shift; in drm_dp_calculate_rad()
2097 struct drm_dp_mst_topology_mgr *mgr = port->mgr; in drm_dp_port_set_pdt()
2102 if (port->pdt == new_pdt && port->mcs == new_mcs) in drm_dp_port_set_pdt()
2106 if (port->pdt != DP_PEER_DEVICE_NONE) { in drm_dp_port_set_pdt()
2107 if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) { in drm_dp_port_set_pdt()
2109 * If the new PDT would also have an i2c bus, in drm_dp_port_set_pdt()
2114 port->pdt = new_pdt; in drm_dp_port_set_pdt()
2115 port->mcs = new_mcs; in drm_dp_port_set_pdt()
2119 /* remove i2c over sideband */ in drm_dp_port_set_pdt()
2122 mutex_lock(&mgr->lock); in drm_dp_port_set_pdt()
2123 drm_dp_mst_topology_put_mstb(port->mstb); in drm_dp_port_set_pdt()
2124 port->mstb = NULL; in drm_dp_port_set_pdt()
2125 mutex_unlock(&mgr->lock); in drm_dp_port_set_pdt()
2129 port->pdt = new_pdt; in drm_dp_port_set_pdt()
2130 port->mcs = new_mcs; in drm_dp_port_set_pdt()
2132 if (port->pdt != DP_PEER_DEVICE_NONE) { in drm_dp_port_set_pdt()
2133 if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) { in drm_dp_port_set_pdt()
2134 /* add i2c over sideband */ in drm_dp_port_set_pdt()
2140 ret = -ENOMEM; in drm_dp_port_set_pdt()
2141 drm_err(mgr->dev, "Failed to create MSTB for port %p", port); in drm_dp_port_set_pdt()
2145 mutex_lock(&mgr->lock); in drm_dp_port_set_pdt()
2146 port->mstb = mstb; in drm_dp_port_set_pdt()
2147 mstb->mgr = port->mgr; in drm_dp_port_set_pdt()
2148 mstb->port_parent = port; in drm_dp_port_set_pdt()
2155 mutex_unlock(&mgr->lock); in drm_dp_port_set_pdt()
2164 port->pdt = DP_PEER_DEVICE_NONE; in drm_dp_port_set_pdt()
2169 * drm_dp_mst_dpcd_read() - read a series of bytes from the DPCD via sideband
2187 return drm_dp_send_dpcd_read(port->mgr, port, in drm_dp_mst_dpcd_read()
2192 * drm_dp_mst_dpcd_write() - write a series of bytes to the DPCD via sideband
2210 return drm_dp_send_dpcd_write(port->mgr, port, in drm_dp_mst_dpcd_write()
2218 memcpy(mstb->guid, guid, 16); in drm_dp_check_mstb_guid()
2220 if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) { in drm_dp_check_mstb_guid()
2221 if (mstb->port_parent) { in drm_dp_check_mstb_guid()
2222 ret = drm_dp_send_dpcd_write(mstb->mgr, in drm_dp_check_mstb_guid()
2223 mstb->port_parent, in drm_dp_check_mstb_guid()
2224 DP_GUID, 16, mstb->guid); in drm_dp_check_mstb_guid()
2226 ret = drm_dp_dpcd_write(mstb->mgr->aux, in drm_dp_check_mstb_guid()
2227 DP_GUID, mstb->guid, 16); in drm_dp_check_mstb_guid()
2232 return -EPROTO; in drm_dp_check_mstb_guid()
2245 snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id); in build_mst_prop_path()
2246 for (i = 0; i < (mstb->lct - 1); i++) { in build_mst_prop_path()
2248 int port_num = (mstb->rad[i / 2] >> shift) & 0xf; in build_mst_prop_path()
2250 snprintf(temp, sizeof(temp), "-%d", port_num); in build_mst_prop_path()
2253 snprintf(temp, sizeof(temp), "-%d", pnum); in build_mst_prop_path()
2258 * drm_dp_mst_connector_late_register() - Late MST connector registration
2271 drm_dbg_kms(port->mgr->dev, "registering %s remote bus for %s\n", in drm_dp_mst_connector_late_register()
2272 port->aux.name, connector->kdev->kobj.name); in drm_dp_mst_connector_late_register()
2274 port->aux.dev = connector->kdev; in drm_dp_mst_connector_late_register()
2275 return drm_dp_aux_register_devnode(&port->aux); in drm_dp_mst_connector_late_register()
2280 * drm_dp_mst_connector_early_unregister() - Early MST connector unregistration
2291 drm_dbg_kms(port->mgr->dev, "unregistering %s remote bus for %s\n", in drm_dp_mst_connector_early_unregister()
2292 port->aux.name, connector->kdev->kobj.name); in drm_dp_mst_connector_early_unregister()
2293 drm_dp_aux_unregister_devnode(&port->aux); in drm_dp_mst_connector_early_unregister()
2301 struct drm_dp_mst_topology_mgr *mgr = port->mgr; in drm_dp_mst_port_add_connector()
2305 build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath)); in drm_dp_mst_port_add_connector()
2306 port->connector = mgr->cbs->add_connector(mgr, port, proppath); in drm_dp_mst_port_add_connector()
2307 if (!port->connector) { in drm_dp_mst_port_add_connector()
2308 ret = -ENOMEM; in drm_dp_mst_port_add_connector()
2312 if (port->pdt != DP_PEER_DEVICE_NONE && in drm_dp_mst_port_add_connector()
2313 drm_dp_mst_is_end_device(port->pdt, port->mcs) && in drm_dp_mst_port_add_connector()
2314 port->port_num >= DP_MST_LOGICAL_PORT_0) in drm_dp_mst_port_add_connector()
2315 port->cached_edid = drm_get_edid(port->connector, in drm_dp_mst_port_add_connector()
2316 &port->aux.ddc); in drm_dp_mst_port_add_connector()
2318 drm_connector_register(port->connector); in drm_dp_mst_port_add_connector()
2322 drm_err(mgr->dev, "Failed to create connector for port %p: %d\n", port, ret); in drm_dp_mst_port_add_connector()
2326 * Drop a topology reference, and unlink the port from the in-memory topology
2333 mutex_lock(&mgr->lock); in drm_dp_mst_topology_unlink_port()
2334 port->parent->num_ports--; in drm_dp_mst_topology_unlink_port()
2335 list_del(&port->next); in drm_dp_mst_topology_unlink_port()
2336 mutex_unlock(&mgr->lock); in drm_dp_mst_topology_unlink_port()
2350 kref_init(&port->topology_kref); in drm_dp_mst_add_port()
2351 kref_init(&port->malloc_kref); in drm_dp_mst_add_port()
2352 port->parent = mstb; in drm_dp_mst_add_port()
2353 port->port_num = port_number; in drm_dp_mst_add_port()
2354 port->mgr = mgr; in drm_dp_mst_add_port()
2355 port->aux.name = "DPMST"; in drm_dp_mst_add_port()
2356 port->aux.dev = dev->dev; in drm_dp_mst_add_port()
2357 port->aux.is_remote = true; in drm_dp_mst_add_port()
2360 port->aux.drm_dev = dev; in drm_dp_mst_add_port()
2361 drm_dp_remote_aux_init(&port->aux); in drm_dp_mst_add_port()
2377 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; in drm_dp_mst_handle_link_address_port()
2384 port = drm_dp_get_port(mstb, port_msg->port_number); in drm_dp_mst_handle_link_address_port()
2387 port_msg->port_number); in drm_dp_mst_handle_link_address_port()
2389 return -ENOMEM; in drm_dp_mst_handle_link_address_port()
2392 } else if (!port->input && port_msg->input_port && port->connector) { in drm_dp_mst_handle_link_address_port()
2393 /* Since port->connector can't be changed here, we create a in drm_dp_mst_handle_link_address_port()
2399 port_msg->port_number); in drm_dp_mst_handle_link_address_port()
2401 return -ENOMEM; in drm_dp_mst_handle_link_address_port()
2404 } else if (port->input && !port_msg->input_port) { in drm_dp_mst_handle_link_address_port()
2406 } else if (port->connector) { in drm_dp_mst_handle_link_address_port()
2410 drm_modeset_lock(&mgr->base.lock, NULL); in drm_dp_mst_handle_link_address_port()
2412 old_ddps = port->ddps; in drm_dp_mst_handle_link_address_port()
2413 changed = port->ddps != port_msg->ddps || in drm_dp_mst_handle_link_address_port()
2414 (port->ddps && in drm_dp_mst_handle_link_address_port()
2415 (port->ldps != port_msg->legacy_device_plug_status || in drm_dp_mst_handle_link_address_port()
2416 port->dpcd_rev != port_msg->dpcd_revision || in drm_dp_mst_handle_link_address_port()
2417 port->mcs != port_msg->mcs || in drm_dp_mst_handle_link_address_port()
2418 port->pdt != port_msg->peer_device_type || in drm_dp_mst_handle_link_address_port()
2419 port->num_sdp_stream_sinks != in drm_dp_mst_handle_link_address_port()
2420 port_msg->num_sdp_stream_sinks)); in drm_dp_mst_handle_link_address_port()
2423 port->input = port_msg->input_port; in drm_dp_mst_handle_link_address_port()
2424 if (!port->input) in drm_dp_mst_handle_link_address_port()
2425 new_pdt = port_msg->peer_device_type; in drm_dp_mst_handle_link_address_port()
2426 new_mcs = port_msg->mcs; in drm_dp_mst_handle_link_address_port()
2427 port->ddps = port_msg->ddps; in drm_dp_mst_handle_link_address_port()
2428 port->ldps = port_msg->legacy_device_plug_status; in drm_dp_mst_handle_link_address_port()
2429 port->dpcd_rev = port_msg->dpcd_revision; in drm_dp_mst_handle_link_address_port()
2430 port->num_sdp_streams = port_msg->num_sdp_streams; in drm_dp_mst_handle_link_address_port()
2431 port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks; in drm_dp_mst_handle_link_address_port()
2433 /* manage mstb port lists with mgr lock - take a reference in drm_dp_mst_handle_link_address_port()
2436 mutex_lock(&mgr->lock); in drm_dp_mst_handle_link_address_port()
2438 list_add(&port->next, &mstb->ports); in drm_dp_mst_handle_link_address_port()
2439 mstb->num_ports++; in drm_dp_mst_handle_link_address_port()
2440 mutex_unlock(&mgr->lock); in drm_dp_mst_handle_link_address_port()
2444 * Reprobe PBN caps on both hotplug, and when re-probing the link in drm_dp_mst_handle_link_address_port()
2447 if (old_ddps != port->ddps || !created) { in drm_dp_mst_handle_link_address_port()
2448 if (port->ddps && !port->input) { in drm_dp_mst_handle_link_address_port()
2454 port->full_pbn = 0; in drm_dp_mst_handle_link_address_port()
2471 if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING && in drm_dp_mst_handle_link_address_port()
2472 port->mcs) in drm_dp_mst_handle_link_address_port()
2475 if (port->connector) in drm_dp_mst_handle_link_address_port()
2476 drm_modeset_unlock(&mgr->base.lock); in drm_dp_mst_handle_link_address_port()
2477 else if (!port->input) in drm_dp_mst_handle_link_address_port()
2480 if (send_link_addr && port->mstb) { in drm_dp_mst_handle_link_address_port()
2481 ret = drm_dp_send_link_address(mgr, port->mstb); in drm_dp_mst_handle_link_address_port()
2494 if (port->connector) in drm_dp_mst_handle_link_address_port()
2495 drm_modeset_unlock(&mgr->base.lock); in drm_dp_mst_handle_link_address_port()
2505 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; in drm_dp_mst_handle_conn_stat()
2512 port = drm_dp_get_port(mstb, conn_stat->port_number); in drm_dp_mst_handle_conn_stat()
2516 if (port->connector) { in drm_dp_mst_handle_conn_stat()
2517 if (!port->input && conn_stat->input_port) { in drm_dp_mst_handle_conn_stat()
2524 mstb->link_address_sent = false; in drm_dp_mst_handle_conn_stat()
2530 drm_modeset_lock(&mgr->base.lock, NULL); in drm_dp_mst_handle_conn_stat()
2531 } else if (port->input && !conn_stat->input_port) { in drm_dp_mst_handle_conn_stat()
2534 mstb->link_address_sent = false; in drm_dp_mst_handle_conn_stat()
2538 old_ddps = port->ddps; in drm_dp_mst_handle_conn_stat()
2539 port->input = conn_stat->input_port; in drm_dp_mst_handle_conn_stat()
2540 port->ldps = conn_stat->legacy_device_plug_status; in drm_dp_mst_handle_conn_stat()
2541 port->ddps = conn_stat->displayport_device_plug_status; in drm_dp_mst_handle_conn_stat()
2543 if (old_ddps != port->ddps) { in drm_dp_mst_handle_conn_stat()
2544 if (port->ddps && !port->input) in drm_dp_mst_handle_conn_stat()
2547 port->full_pbn = 0; in drm_dp_mst_handle_conn_stat()
2550 new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type; in drm_dp_mst_handle_conn_stat()
2551 new_mcs = conn_stat->message_capability_status; in drm_dp_mst_handle_conn_stat()
2556 drm_err(mgr->dev, "Failed to change PDT for port %p: %d\n", port, ret); in drm_dp_mst_handle_conn_stat()
2560 if (port->connector) in drm_dp_mst_handle_conn_stat()
2561 drm_modeset_unlock(&mgr->base.lock); in drm_dp_mst_handle_conn_stat()
2568 queue_work(system_long_wq, &mstb->mgr->work); in drm_dp_mst_handle_conn_stat()
2579 mutex_lock(&mgr->lock); in drm_dp_get_mst_branch_device()
2580 mstb = mgr->mst_primary; in drm_dp_get_mst_branch_device()
2585 for (i = 0; i < lct - 1; i++) { in drm_dp_get_mst_branch_device()
2589 list_for_each_entry(port, &mstb->ports, next) { in drm_dp_get_mst_branch_device()
2590 if (port->port_num == port_num) { in drm_dp_get_mst_branch_device()
2591 mstb = port->mstb; in drm_dp_get_mst_branch_device()
2593 drm_err(mgr->dev, in drm_dp_get_mst_branch_device()
2607 mutex_unlock(&mgr->lock); in drm_dp_get_mst_branch_device()
2618 if (memcmp(mstb->guid, guid, 16) == 0) in get_mst_branch_device_by_guid_helper()
2622 list_for_each_entry(port, &mstb->ports, next) { in get_mst_branch_device_by_guid_helper()
2623 if (!port->mstb) in get_mst_branch_device_by_guid_helper()
2626 found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid); in get_mst_branch_device_by_guid_helper()
2643 mutex_lock(&mgr->lock); in drm_dp_get_mst_branch_device_by_guid()
2645 mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid); in drm_dp_get_mst_branch_device_by_guid()
2652 mutex_unlock(&mgr->lock); in drm_dp_get_mst_branch_device_by_guid()
2663 if (!mstb->link_address_sent) { in drm_dp_check_and_send_link_address()
2671 list_for_each_entry(port, &mstb->ports, next) { in drm_dp_check_and_send_link_address()
2674 if (port->input || !port->ddps) in drm_dp_check_and_send_link_address()
2677 if (port->mstb) in drm_dp_check_and_send_link_address()
2679 mgr, port->mstb); in drm_dp_check_and_send_link_address()
2699 struct drm_device *dev = mgr->dev; in drm_dp_mst_link_probe_work()
2704 mutex_lock(&mgr->probe_lock); in drm_dp_mst_link_probe_work()
2706 mutex_lock(&mgr->lock); in drm_dp_mst_link_probe_work()
2707 clear_payload_id_table = !mgr->payload_id_table_cleared; in drm_dp_mst_link_probe_work()
2708 mgr->payload_id_table_cleared = true; in drm_dp_mst_link_probe_work()
2710 mstb = mgr->mst_primary; in drm_dp_mst_link_probe_work()
2716 mutex_unlock(&mgr->lock); in drm_dp_mst_link_probe_work()
2718 mutex_unlock(&mgr->probe_lock); in drm_dp_mst_link_probe_work()
2726 * drm_dp_mst_topology_mgr_set_mst(). Namely, the CableMatters USB-C in drm_dp_mst_link_probe_work()
2738 mutex_unlock(&mgr->probe_lock); in drm_dp_mst_link_probe_work()
2783 tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total); in drm_dp_send_sideband_msg()
2785 ret = drm_dp_dpcd_write(mgr->aux, regbase + offset, in drm_dp_send_sideband_msg()
2789 if (ret == -EIO && retries < 5) { in drm_dp_send_sideband_msg()
2793 drm_dbg_kms(mgr->dev, "failed to dpcd write %d %d\n", tosend, ret); in drm_dp_send_sideband_msg()
2795 return -EIO; in drm_dp_send_sideband_msg()
2798 total -= tosend; in drm_dp_send_sideband_msg()
2806 struct drm_dp_mst_branch *mstb = txmsg->dst; in set_hdr_from_dst_qlock()
2809 req_type = txmsg->msg[0] & 0x7f; in set_hdr_from_dst_qlock()
2813 hdr->broadcast = 1; in set_hdr_from_dst_qlock()
2815 hdr->broadcast = 0; in set_hdr_from_dst_qlock()
2816 hdr->path_msg = txmsg->path_msg; in set_hdr_from_dst_qlock()
2817 if (hdr->broadcast) { in set_hdr_from_dst_qlock()
2818 hdr->lct = 1; in set_hdr_from_dst_qlock()
2819 hdr->lcr = 6; in set_hdr_from_dst_qlock()
2821 hdr->lct = mstb->lct; in set_hdr_from_dst_qlock()
2822 hdr->lcr = mstb->lct - 1; in set_hdr_from_dst_qlock()
2825 memcpy(hdr->rad, mstb->rad, hdr->lct / 2); in set_hdr_from_dst_qlock()
2841 if (txmsg->state == DRM_DP_SIDEBAND_TX_SENT) in process_single_tx_qlock()
2846 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) in process_single_tx_qlock()
2847 txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND; in process_single_tx_qlock()
2855 len = txmsg->cur_len - txmsg->cur_offset; in process_single_tx_qlock()
2857 /* 48 - sideband msg size - 1 byte for data CRC, x header bytes */ in process_single_tx_qlock()
2858 space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr); in process_single_tx_qlock()
2861 if (len == txmsg->cur_len) in process_single_tx_qlock()
2869 memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend); in process_single_tx_qlock()
2885 txmsg->cur_offset += tosend; in process_single_tx_qlock()
2886 if (txmsg->cur_offset == txmsg->cur_len) { in process_single_tx_qlock()
2887 txmsg->state = DRM_DP_SIDEBAND_TX_SENT; in process_single_tx_qlock()
2898 WARN_ON(!mutex_is_locked(&mgr->qlock)); in process_single_down_tx_qlock()
2901 if (list_empty(&mgr->tx_msg_downq)) in process_single_down_tx_qlock()
2904 txmsg = list_first_entry(&mgr->tx_msg_downq, in process_single_down_tx_qlock()
2908 drm_dbg_kms(mgr->dev, "failed to send msg in q %d\n", ret); in process_single_down_tx_qlock()
2909 list_del(&txmsg->next); in process_single_down_tx_qlock()
2910 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT; in process_single_down_tx_qlock()
2911 wake_up_all(&mgr->tx_waitq); in process_single_down_tx_qlock()
2918 mutex_lock(&mgr->qlock); in drm_dp_queue_down_tx()
2919 list_add_tail(&txmsg->next, &mgr->tx_msg_downq); in drm_dp_queue_down_tx()
2927 if (list_is_singular(&mgr->tx_msg_downq)) in drm_dp_queue_down_tx()
2929 mutex_unlock(&mgr->qlock); in drm_dp_queue_down_tx()
2939 for (i = 0; i < reply->nports; i++) { in drm_dp_dump_link_address()
2940 port_reply = &reply->ports[i]; in drm_dp_dump_link_address()
2941 drm_dbg_kms(mgr->dev, in drm_dp_dump_link_address()
2944 port_reply->input_port, in drm_dp_dump_link_address()
2945 port_reply->peer_device_type, in drm_dp_dump_link_address()
2946 port_reply->port_number, in drm_dp_dump_link_address()
2947 port_reply->dpcd_revision, in drm_dp_dump_link_address()
2948 port_reply->mcs, in drm_dp_dump_link_address()
2949 port_reply->ddps, in drm_dp_dump_link_address()
2950 port_reply->legacy_device_plug_status, in drm_dp_dump_link_address()
2951 port_reply->num_sdp_streams, in drm_dp_dump_link_address()
2952 port_reply->num_sdp_stream_sinks); in drm_dp_dump_link_address()
2967 return -ENOMEM; in drm_dp_send_link_address()
2969 txmsg->dst = mstb; in drm_dp_send_link_address()
2972 mstb->link_address_sent = true; in drm_dp_send_link_address()
2978 drm_err(mgr->dev, "Sending link address failed with %d\n", ret); in drm_dp_send_link_address()
2981 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) { in drm_dp_send_link_address()
2982 drm_err(mgr->dev, "link address NAK received\n"); in drm_dp_send_link_address()
2983 ret = -EIO; in drm_dp_send_link_address()
2987 reply = &txmsg->reply.u.link_addr; in drm_dp_send_link_address()
2988 drm_dbg_kms(mgr->dev, "link address reply: %d\n", reply->nports); in drm_dp_send_link_address()
2991 ret = drm_dp_check_mstb_guid(mstb, reply->guid); in drm_dp_send_link_address()
2995 drm_dp_mst_rad_to_str(mstb->rad, mstb->lct, buf, sizeof(buf)); in drm_dp_send_link_address()
2996 drm_err(mgr->dev, "GUID check on %s failed: %d\n", buf, ret); in drm_dp_send_link_address()
3000 for (i = 0; i < reply->nports; i++) { in drm_dp_send_link_address()
3001 port_mask |= BIT(reply->ports[i].port_number); in drm_dp_send_link_address()
3002 ret = drm_dp_mst_handle_link_address_port(mstb, mgr->dev, in drm_dp_send_link_address()
3003 &reply->ports[i]); in drm_dp_send_link_address()
3010 /* Prune any ports that are currently a part of mstb in our in-memory in drm_dp_send_link_address()
3011 * topology, but were not seen in this link address. Usually this in drm_dp_send_link_address()
3012 * means that they were removed while the topology was out of sync, in drm_dp_send_link_address()
3015 mutex_lock(&mgr->lock); in drm_dp_send_link_address()
3016 list_for_each_entry_safe(port, tmp, &mstb->ports, next) { in drm_dp_send_link_address()
3017 if (port_mask & BIT(port->port_num)) in drm_dp_send_link_address()
3020 drm_dbg_kms(mgr->dev, "port %d was not in link address, removing\n", in drm_dp_send_link_address()
3021 port->port_num); in drm_dp_send_link_address()
3022 list_del(&port->next); in drm_dp_send_link_address()
3026 mutex_unlock(&mgr->lock); in drm_dp_send_link_address()
3030 mstb->link_address_sent = false; in drm_dp_send_link_address()
3046 txmsg->dst = mstb; in drm_dp_send_clear_payload_id_table()
3052 if (ret > 0 && txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) in drm_dp_send_clear_payload_id_table()
3053 drm_dbg_kms(mgr->dev, "clear payload table id nak received\n"); in drm_dp_send_clear_payload_id_table()
3069 return -ENOMEM; in drm_dp_send_enum_path_resources()
3071 txmsg->dst = mstb; in drm_dp_send_enum_path_resources()
3072 build_enum_path_resources(txmsg, port->port_num); in drm_dp_send_enum_path_resources()
3079 path_res = &txmsg->reply.u.path_resources; in drm_dp_send_enum_path_resources()
3081 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) { in drm_dp_send_enum_path_resources()
3082 drm_dbg_kms(mgr->dev, "enum path resources nak received\n"); in drm_dp_send_enum_path_resources()
3084 if (port->port_num != path_res->port_number) in drm_dp_send_enum_path_resources()
3087 drm_dbg_kms(mgr->dev, "enum path resources %d: %d %d\n", in drm_dp_send_enum_path_resources()
3088 path_res->port_number, in drm_dp_send_enum_path_resources()
3089 path_res->full_payload_bw_number, in drm_dp_send_enum_path_resources()
3090 path_res->avail_payload_bw_number); in drm_dp_send_enum_path_resources()
3096 if (port->full_pbn != path_res->full_payload_bw_number || in drm_dp_send_enum_path_resources()
3097 port->fec_capable != path_res->fec_capable) in drm_dp_send_enum_path_resources()
3100 port->full_pbn = path_res->full_payload_bw_number; in drm_dp_send_enum_path_resources()
3101 port->fec_capable = path_res->fec_capable; in drm_dp_send_enum_path_resources()
3111 if (!mstb->port_parent) in drm_dp_get_last_connected_port_to_mstb()
3114 if (mstb->port_parent->mstb != mstb) in drm_dp_get_last_connected_port_to_mstb()
3115 return mstb->port_parent; in drm_dp_get_last_connected_port_to_mstb()
3117 return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent); in drm_dp_get_last_connected_port_to_mstb()
3121 * Searches upwards in the topology starting from mstb to try to find the
3123 * topology. This can be used in order to perform operations like releasing
3136 mutex_lock(&mgr->lock); in drm_dp_get_last_connected_port_and_mstb()
3137 if (!mgr->mst_primary) in drm_dp_get_last_connected_port_and_mstb()
3145 if (drm_dp_mst_topology_try_get_mstb(found_port->parent)) { in drm_dp_get_last_connected_port_and_mstb()
3146 rmstb = found_port->parent; in drm_dp_get_last_connected_port_and_mstb()
3147 *port_num = found_port->port_num; in drm_dp_get_last_connected_port_and_mstb()
3150 mstb = found_port->parent; in drm_dp_get_last_connected_port_and_mstb()
3154 mutex_unlock(&mgr->lock); in drm_dp_get_last_connected_port_and_mstb()
3169 port_num = port->port_num; in drm_dp_payload_send_msg()
3170 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent); in drm_dp_payload_send_msg()
3173 port->parent, in drm_dp_payload_send_msg()
3177 return -EINVAL; in drm_dp_payload_send_msg()
3182 ret = -ENOMEM; in drm_dp_payload_send_msg()
3186 for (i = 0; i < port->num_sdp_streams; i++) in drm_dp_payload_send_msg()
3189 txmsg->dst = mstb; in drm_dp_payload_send_msg()
3192 pbn, port->num_sdp_streams, sinks); in drm_dp_payload_send_msg()
3199 * mstb could also be removed from the topology. In the future, this in drm_dp_payload_send_msg()
3202 * timeout if the topology is still connected to the system. in drm_dp_payload_send_msg()
3206 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) in drm_dp_payload_send_msg()
3207 ret = -EINVAL; in drm_dp_payload_send_msg()
3225 return -EINVAL; in drm_dp_send_power_updown_phy()
3230 return -ENOMEM; in drm_dp_send_power_updown_phy()
3233 txmsg->dst = port->parent; in drm_dp_send_power_updown_phy()
3234 build_power_updown_phy(txmsg, port->port_num, power_up); in drm_dp_send_power_updown_phy()
3237 ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg); in drm_dp_send_power_updown_phy()
3239 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) in drm_dp_send_power_updown_phy()
3240 ret = -EINVAL; in drm_dp_send_power_updown_phy()
3261 return -ENOMEM; in drm_dp_send_query_stream_enc_status()
3265 ret = -EINVAL; in drm_dp_send_query_stream_enc_status()
3276 txmsg->dst = mgr->mst_primary; in drm_dp_send_query_stream_enc_status()
3278 build_query_stream_enc_status(txmsg, port->vcpi.vcpi, nonce); in drm_dp_send_query_stream_enc_status()
3282 ret = drm_dp_mst_wait_tx_reply(mgr->mst_primary, txmsg); in drm_dp_send_query_stream_enc_status()
3285 } else if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) { in drm_dp_send_query_stream_enc_status()
3286 drm_dbg_kms(mgr->dev, "query encryption status nak received\n"); in drm_dp_send_query_stream_enc_status()
3287 ret = -ENXIO; in drm_dp_send_query_stream_enc_status()
3292 memcpy(status, &txmsg->reply.u.enc_status, sizeof(*status)); in drm_dp_send_query_stream_enc_status()
3310 payload->payload_state = 0; in drm_dp_create_payload_step1()
3313 payload->payload_state = DP_PAYLOAD_LOCAL; in drm_dp_create_payload_step1()
3324 ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn); in drm_dp_create_payload_step2()
3327 payload->payload_state = DP_PAYLOAD_REMOTE; in drm_dp_create_payload_step2()
3336 drm_dbg_kms(mgr->dev, "\n"); in drm_dp_destroy_payload_step1()
3343 payload->payload_state = DP_PAYLOAD_DELETE_LOCAL; in drm_dp_destroy_payload_step1()
3351 payload->payload_state = 0; in drm_dp_destroy_payload_step2()
3356 * drm_dp_update_payload_part1() - Execute payload update part 1
3360 * allocate space in the link for them. For 0->slots transitions,
3361 * this step just writes the VCPI to the MST device. For slots->0
3376 mutex_lock(&mgr->payload_lock); in drm_dp_update_payload_part1()
3377 for (i = 0; i < mgr->max_payloads; i++) { in drm_dp_update_payload_part1()
3378 struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i]; in drm_dp_update_payload_part1()
3379 struct drm_dp_payload *payload = &mgr->payloads[i]; in drm_dp_update_payload_part1()
3382 /* solve the current payloads - compare to the hw ones in drm_dp_update_payload_part1()
3383 - update the hw view */ in drm_dp_update_payload_part1()
3389 mutex_lock(&mgr->lock); in drm_dp_update_payload_part1()
3390 skip = !drm_dp_mst_port_downstream_of_branch(port, mgr->mst_primary); in drm_dp_update_payload_part1()
3391 mutex_unlock(&mgr->lock); in drm_dp_update_payload_part1()
3394 drm_dbg_kms(mgr->dev, in drm_dp_update_payload_part1()
3395 "Virtual channel %d is not in current topology\n", in drm_dp_update_payload_part1()
3402 if (vcpi->num_slots) { in drm_dp_update_payload_part1()
3406 if (vcpi->num_slots == payload->num_slots) { in drm_dp_update_payload_part1()
3407 cur_slots += vcpi->num_slots; in drm_dp_update_payload_part1()
3408 payload->start_slot = req_payload.start_slot; in drm_dp_update_payload_part1()
3411 drm_dbg_kms(mgr->dev, in drm_dp_update_payload_part1()
3413 mutex_unlock(&mgr->payload_lock); in drm_dp_update_payload_part1()
3414 return -EINVAL; in drm_dp_update_payload_part1()
3420 req_payload.num_slots = vcpi->num_slots; in drm_dp_update_payload_part1()
3421 req_payload.vcpi = vcpi->vcpi; in drm_dp_update_payload_part1()
3427 payload->start_slot = req_payload.start_slot; in drm_dp_update_payload_part1()
3429 if (payload->num_slots != req_payload.num_slots) { in drm_dp_update_payload_part1()
3433 drm_dp_create_payload_step1(mgr, vcpi->vcpi, in drm_dp_update_payload_part1()
3435 payload->num_slots = req_payload.num_slots; in drm_dp_update_payload_part1()
3436 payload->vcpi = req_payload.vcpi; in drm_dp_update_payload_part1()
3438 } else if (payload->num_slots) { in drm_dp_update_payload_part1()
3439 payload->num_slots = 0; in drm_dp_update_payload_part1()
3441 payload->vcpi, in drm_dp_update_payload_part1()
3444 payload->payload_state; in drm_dp_update_payload_part1()
3445 payload->start_slot = 0; in drm_dp_update_payload_part1()
3447 payload->payload_state = req_payload.payload_state; in drm_dp_update_payload_part1()
3455 for (i = 0; i < mgr->max_payloads; /* do nothing */) { in drm_dp_update_payload_part1()
3456 if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL) { in drm_dp_update_payload_part1()
3461 drm_dbg_kms(mgr->dev, "removing payload %d\n", i); in drm_dp_update_payload_part1()
3462 for (j = i; j < mgr->max_payloads - 1; j++) { in drm_dp_update_payload_part1()
3463 mgr->payloads[j] = mgr->payloads[j + 1]; in drm_dp_update_payload_part1()
3464 mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1]; in drm_dp_update_payload_part1()
3466 if (mgr->proposed_vcpis[j] && in drm_dp_update_payload_part1()
3467 mgr->proposed_vcpis[j]->num_slots) { in drm_dp_update_payload_part1()
3468 set_bit(j + 1, &mgr->payload_mask); in drm_dp_update_payload_part1()
3470 clear_bit(j + 1, &mgr->payload_mask); in drm_dp_update_payload_part1()
3474 memset(&mgr->payloads[mgr->max_payloads - 1], 0, in drm_dp_update_payload_part1()
3476 mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL; in drm_dp_update_payload_part1()
3477 clear_bit(mgr->max_payloads, &mgr->payload_mask); in drm_dp_update_payload_part1()
3479 mutex_unlock(&mgr->payload_lock); in drm_dp_update_payload_part1()
3486 * drm_dp_update_payload_part2() - Execute payload update part 2
3490 * allocate space in the link for them. For 0->slots transitions,
3491 * this step writes the remote VC payload commands. For slots->0
3501 mutex_lock(&mgr->payload_lock); in drm_dp_update_payload_part2()
3502 for (i = 0; i < mgr->max_payloads; i++) { in drm_dp_update_payload_part2()
3504 if (!mgr->proposed_vcpis[i]) in drm_dp_update_payload_part2()
3507 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi); in drm_dp_update_payload_part2()
3509 mutex_lock(&mgr->lock); in drm_dp_update_payload_part2()
3510 skip = !drm_dp_mst_port_downstream_of_branch(port, mgr->mst_primary); in drm_dp_update_payload_part2()
3511 mutex_unlock(&mgr->lock); in drm_dp_update_payload_part2()
3516 drm_dbg_kms(mgr->dev, "payload %d %d\n", i, mgr->payloads[i].payload_state); in drm_dp_update_payload_part2()
3517 if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) { in drm_dp_update_payload_part2()
3518 ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]); in drm_dp_update_payload_part2()
3519 } else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) { in drm_dp_update_payload_part2()
3520 ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]); in drm_dp_update_payload_part2()
3523 mutex_unlock(&mgr->payload_lock); in drm_dp_update_payload_part2()
3527 mutex_unlock(&mgr->payload_lock); in drm_dp_update_payload_part2()
3540 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent); in drm_dp_send_dpcd_read()
3542 return -EINVAL; in drm_dp_send_dpcd_read()
3546 ret = -ENOMEM; in drm_dp_send_dpcd_read()
3550 build_dpcd_read(txmsg, port->port_num, offset, size); in drm_dp_send_dpcd_read()
3551 txmsg->dst = port->parent; in drm_dp_send_dpcd_read()
3560 if (txmsg->reply.reply_type == 1) { in drm_dp_send_dpcd_read()
3561 drm_err(mgr->dev, "mstb %p port %d: DPCD read on addr 0x%x for %d bytes NAKed\n", in drm_dp_send_dpcd_read()
3562 mstb, port->port_num, offset, size); in drm_dp_send_dpcd_read()
3563 ret = -EIO; in drm_dp_send_dpcd_read()
3567 if (txmsg->reply.u.remote_dpcd_read_ack.num_bytes != size) { in drm_dp_send_dpcd_read()
3568 ret = -EPROTO; in drm_dp_send_dpcd_read()
3572 ret = min_t(size_t, txmsg->reply.u.remote_dpcd_read_ack.num_bytes, in drm_dp_send_dpcd_read()
3574 memcpy(bytes, txmsg->reply.u.remote_dpcd_read_ack.bytes, ret); in drm_dp_send_dpcd_read()
3592 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent); in drm_dp_send_dpcd_write()
3594 return -EINVAL; in drm_dp_send_dpcd_write()
3598 ret = -ENOMEM; in drm_dp_send_dpcd_write()
3602 build_dpcd_write(txmsg, port->port_num, offset, size, bytes); in drm_dp_send_dpcd_write()
3603 txmsg->dst = mstb; in drm_dp_send_dpcd_write()
3609 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) in drm_dp_send_dpcd_write()
3610 ret = -EIO; in drm_dp_send_dpcd_write()
3639 return -ENOMEM; in drm_dp_send_up_ack_reply()
3641 txmsg->dst = mstb; in drm_dp_send_up_ack_reply()
3644 mutex_lock(&mgr->qlock); in drm_dp_send_up_ack_reply()
3647 mutex_unlock(&mgr->qlock); in drm_dp_send_up_ack_reply()
3654 * drm_dp_get_vc_payload_bw - get the VC payload BW for an MST link
3668 drm_dbg_kms(mgr->dev, "invalid link rate/lane count: (%d / %d)\n", in drm_dp_get_vc_payload_bw()
3677 * drm_dp_read_mst_cap() - check whether or not a sink supports MST
3699 * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
3701 * @mst_state: true to enable MST on this connector - false to disable.
3711 mutex_lock(&mgr->payload_lock); in drm_dp_mst_topology_mgr_set_mst()
3712 mutex_lock(&mgr->lock); in drm_dp_mst_topology_mgr_set_mst()
3713 if (mst_state == mgr->mst_state) in drm_dp_mst_topology_mgr_set_mst()
3716 mgr->mst_state = mst_state; in drm_dp_mst_topology_mgr_set_mst()
3723 WARN_ON(mgr->mst_primary); in drm_dp_mst_topology_mgr_set_mst()
3726 ret = drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd); in drm_dp_mst_topology_mgr_set_mst()
3728 drm_dbg_kms(mgr->dev, "%s: failed to read DPCD, ret %d\n", in drm_dp_mst_topology_mgr_set_mst()
3729 mgr->aux->name, ret); in drm_dp_mst_topology_mgr_set_mst()
3733 lane_count = min_t(int, mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK, mgr->max_lane_count); in drm_dp_mst_topology_mgr_set_mst()
3734 link_rate = min_t(int, drm_dp_bw_code_to_link_rate(mgr->dpcd[1]), mgr->max_link_rate); in drm_dp_mst_topology_mgr_set_mst()
3735 mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr, in drm_dp_mst_topology_mgr_set_mst()
3738 if (mgr->pbn_div == 0) { in drm_dp_mst_topology_mgr_set_mst()
3739 ret = -EINVAL; in drm_dp_mst_topology_mgr_set_mst()
3746 ret = -ENOMEM; in drm_dp_mst_topology_mgr_set_mst()
3749 mstb->mgr = mgr; in drm_dp_mst_topology_mgr_set_mst()
3752 mgr->mst_primary = mstb; in drm_dp_mst_topology_mgr_set_mst()
3753 drm_dp_mst_topology_get_mstb(mgr->mst_primary); in drm_dp_mst_topology_mgr_set_mst()
3755 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, in drm_dp_mst_topology_mgr_set_mst()
3766 queue_work(system_long_wq, &mgr->work); in drm_dp_mst_topology_mgr_set_mst()
3771 mstb = mgr->mst_primary; in drm_dp_mst_topology_mgr_set_mst()
3772 mgr->mst_primary = NULL; in drm_dp_mst_topology_mgr_set_mst()
3774 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0); in drm_dp_mst_topology_mgr_set_mst()
3776 memset(mgr->payloads, 0, in drm_dp_mst_topology_mgr_set_mst()
3777 mgr->max_payloads * sizeof(mgr->payloads[0])); in drm_dp_mst_topology_mgr_set_mst()
3778 memset(mgr->proposed_vcpis, 0, in drm_dp_mst_topology_mgr_set_mst()
3779 mgr->max_payloads * sizeof(mgr->proposed_vcpis[0])); in drm_dp_mst_topology_mgr_set_mst()
3780 mgr->payload_mask = 0; in drm_dp_mst_topology_mgr_set_mst()
3781 set_bit(0, &mgr->payload_mask); in drm_dp_mst_topology_mgr_set_mst()
3782 mgr->vcpi_mask = 0; in drm_dp_mst_topology_mgr_set_mst()
3783 mgr->payload_id_table_cleared = false; in drm_dp_mst_topology_mgr_set_mst()
3787 mutex_unlock(&mgr->lock); in drm_dp_mst_topology_mgr_set_mst()
3788 mutex_unlock(&mgr->payload_lock); in drm_dp_mst_topology_mgr_set_mst()
3801 /* The link address will need to be re-sent on resume */ in drm_dp_mst_topology_mgr_invalidate_mstb()
3802 mstb->link_address_sent = false; in drm_dp_mst_topology_mgr_invalidate_mstb()
3804 list_for_each_entry(port, &mstb->ports, next) in drm_dp_mst_topology_mgr_invalidate_mstb()
3805 if (port->mstb) in drm_dp_mst_topology_mgr_invalidate_mstb()
3806 drm_dp_mst_topology_mgr_invalidate_mstb(port->mstb); in drm_dp_mst_topology_mgr_invalidate_mstb()
3810 * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
3818 mutex_lock(&mgr->lock); in drm_dp_mst_topology_mgr_suspend()
3819 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, in drm_dp_mst_topology_mgr_suspend()
3821 mutex_unlock(&mgr->lock); in drm_dp_mst_topology_mgr_suspend()
3822 flush_work(&mgr->up_req_work); in drm_dp_mst_topology_mgr_suspend()
3823 flush_work(&mgr->work); in drm_dp_mst_topology_mgr_suspend()
3824 flush_work(&mgr->delayed_destroy_work); in drm_dp_mst_topology_mgr_suspend()
3826 mutex_lock(&mgr->lock); in drm_dp_mst_topology_mgr_suspend()
3827 if (mgr->mst_state && mgr->mst_primary) in drm_dp_mst_topology_mgr_suspend()
3828 drm_dp_mst_topology_mgr_invalidate_mstb(mgr->mst_primary); in drm_dp_mst_topology_mgr_suspend()
3829 mutex_unlock(&mgr->lock); in drm_dp_mst_topology_mgr_suspend()
3834 * drm_dp_mst_topology_mgr_resume() - resume the MST manager
3836 * @sync: whether or not to perform topology reprobing synchronously
3841 * If the device fails this returns -1, and the driver should do
3850 * Returns: -1 if the MST topology was removed while we were suspended, 0
3859 mutex_lock(&mgr->lock); in drm_dp_mst_topology_mgr_resume()
3860 if (!mgr->mst_primary) in drm_dp_mst_topology_mgr_resume()
3863 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, in drm_dp_mst_topology_mgr_resume()
3866 drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n"); in drm_dp_mst_topology_mgr_resume()
3870 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, in drm_dp_mst_topology_mgr_resume()
3875 drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n"); in drm_dp_mst_topology_mgr_resume()
3880 ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16); in drm_dp_mst_topology_mgr_resume()
3882 drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n"); in drm_dp_mst_topology_mgr_resume()
3886 ret = drm_dp_check_mstb_guid(mgr->mst_primary, guid); in drm_dp_mst_topology_mgr_resume()
3888 drm_dbg_kms(mgr->dev, "check mstb failed - undocked during suspend?\n"); in drm_dp_mst_topology_mgr_resume()
3893 * For the final step of resuming the topology, we need to bring the in drm_dp_mst_topology_mgr_resume()
3894 * state of our in-memory topology back into sync with reality. So, in drm_dp_mst_topology_mgr_resume()
3897 queue_work(system_long_wq, &mgr->work); in drm_dp_mst_topology_mgr_resume()
3898 mutex_unlock(&mgr->lock); in drm_dp_mst_topology_mgr_resume()
3901 drm_dbg_kms(mgr->dev, in drm_dp_mst_topology_mgr_resume()
3902 "Waiting for link probe work to finish re-syncing topology...\n"); in drm_dp_mst_topology_mgr_resume()
3903 flush_work(&mgr->work); in drm_dp_mst_topology_mgr_resume()
3909 mutex_unlock(&mgr->lock); in drm_dp_mst_topology_mgr_resume()
3910 return -1; in drm_dp_mst_topology_mgr_resume()
3925 up ? &mgr->up_req_recv : &mgr->down_rep_recv; in drm_dp_get_one_sb_msg()
3932 len = min(mgr->max_dpcd_transaction_bytes, 16); in drm_dp_get_one_sb_msg()
3933 ret = drm_dp_dpcd_read(mgr->aux, basereg, replyblock, len); in drm_dp_get_one_sb_msg()
3935 drm_dbg_kms(mgr->dev, "failed to read DPCD down rep %d %d\n", len, ret); in drm_dp_get_one_sb_msg()
3943 drm_dbg_kms(mgr->dev, "ERROR: failed header\n"); in drm_dp_get_one_sb_msg()
3951 drm_dbg_kms(mgr->dev, "Got MST reply from unknown device %d\n", hdr.lct); in drm_dp_get_one_sb_msg()
3957 drm_dbg_kms(mgr->dev, "sideband msg set header failed %d\n", replyblock[0]); in drm_dp_get_one_sb_msg()
3961 replylen = min(msg->curchunk_len, (u8)(len - hdrlen)); in drm_dp_get_one_sb_msg()
3964 drm_dbg_kms(mgr->dev, "sideband msg build failed %d\n", replyblock[0]); in drm_dp_get_one_sb_msg()
3968 replylen = msg->curchunk_len + msg->curchunk_hdrlen - len; in drm_dp_get_one_sb_msg()
3971 len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16); in drm_dp_get_one_sb_msg()
3972 ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply, in drm_dp_get_one_sb_msg()
3975 drm_dbg_kms(mgr->dev, "failed to read a chunk (len %d, ret %d)\n", in drm_dp_get_one_sb_msg()
3982 drm_dbg_kms(mgr->dev, "failed to build sideband msg\n"); in drm_dp_get_one_sb_msg()
3987 replylen -= len; in drm_dp_get_one_sb_msg()
3996 struct drm_dp_sideband_msg_rx *msg = &mgr->down_rep_recv; in drm_dp_mst_handle_down_rep()
4001 /* Multi-packet message transmission, don't clear the reply */ in drm_dp_mst_handle_down_rep()
4002 if (!msg->have_eomt) in drm_dp_mst_handle_down_rep()
4006 mutex_lock(&mgr->qlock); in drm_dp_mst_handle_down_rep()
4007 txmsg = list_first_entry_or_null(&mgr->tx_msg_downq, in drm_dp_mst_handle_down_rep()
4009 mutex_unlock(&mgr->qlock); in drm_dp_mst_handle_down_rep()
4012 if (!txmsg || txmsg->dst != mstb) { in drm_dp_mst_handle_down_rep()
4015 hdr = &msg->initial_hdr; in drm_dp_mst_handle_down_rep()
4016 drm_dbg_kms(mgr->dev, "Got MST reply with no msg %p %d %d %02x %02x\n", in drm_dp_mst_handle_down_rep()
4017 mstb, hdr->seqno, hdr->lct, hdr->rad[0], msg->msg[0]); in drm_dp_mst_handle_down_rep()
4021 drm_dp_sideband_parse_reply(mgr, msg, &txmsg->reply); in drm_dp_mst_handle_down_rep()
4023 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) { in drm_dp_mst_handle_down_rep()
4024 drm_dbg_kms(mgr->dev, in drm_dp_mst_handle_down_rep()
4026 txmsg->reply.req_type, in drm_dp_mst_handle_down_rep()
4027 drm_dp_mst_req_type_str(txmsg->reply.req_type), in drm_dp_mst_handle_down_rep()
4028 txmsg->reply.u.nak.reason, in drm_dp_mst_handle_down_rep()
4029 drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason), in drm_dp_mst_handle_down_rep()
4030 txmsg->reply.u.nak.nak_data); in drm_dp_mst_handle_down_rep()
4036 mutex_lock(&mgr->qlock); in drm_dp_mst_handle_down_rep()
4037 txmsg->state = DRM_DP_SIDEBAND_TX_RX; in drm_dp_mst_handle_down_rep()
4038 list_del(&txmsg->next); in drm_dp_mst_handle_down_rep()
4039 mutex_unlock(&mgr->qlock); in drm_dp_mst_handle_down_rep()
4041 wake_up_all(&mgr->tx_waitq); in drm_dp_mst_handle_down_rep()
4059 struct drm_dp_sideband_msg_req_body *msg = &up_req->msg; in drm_dp_mst_process_up_req()
4060 struct drm_dp_sideband_msg_hdr *hdr = &up_req->hdr; in drm_dp_mst_process_up_req()
4063 if (hdr->broadcast) { in drm_dp_mst_process_up_req()
4066 if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) in drm_dp_mst_process_up_req()
4067 guid = msg->u.conn_stat.guid; in drm_dp_mst_process_up_req()
4068 else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY) in drm_dp_mst_process_up_req()
4069 guid = msg->u.resource_stat.guid; in drm_dp_mst_process_up_req()
4074 mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad); in drm_dp_mst_process_up_req()
4078 drm_dbg_kms(mgr->dev, "Got MST reply from unknown device %d\n", hdr->lct); in drm_dp_mst_process_up_req()
4083 if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) { in drm_dp_mst_process_up_req()
4084 drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat); in drm_dp_mst_process_up_req()
4100 mutex_lock(&mgr->probe_lock); in drm_dp_mst_up_req_work()
4102 mutex_lock(&mgr->up_req_lock); in drm_dp_mst_up_req_work()
4103 up_req = list_first_entry_or_null(&mgr->up_req_list, in drm_dp_mst_up_req_work()
4107 list_del(&up_req->next); in drm_dp_mst_up_req_work()
4108 mutex_unlock(&mgr->up_req_lock); in drm_dp_mst_up_req_work()
4116 mutex_unlock(&mgr->probe_lock); in drm_dp_mst_up_req_work()
4119 drm_kms_helper_hotplug_event(mgr->dev); in drm_dp_mst_up_req_work()
4129 if (!mgr->up_req_recv.have_eomt) in drm_dp_mst_handle_up_req()
4134 return -ENOMEM; in drm_dp_mst_handle_up_req()
4136 INIT_LIST_HEAD(&up_req->next); in drm_dp_mst_handle_up_req()
4138 drm_dp_sideband_parse_req(mgr, &mgr->up_req_recv, &up_req->msg); in drm_dp_mst_handle_up_req()
4140 if (up_req->msg.req_type != DP_CONNECTION_STATUS_NOTIFY && in drm_dp_mst_handle_up_req()
4141 up_req->msg.req_type != DP_RESOURCE_STATUS_NOTIFY) { in drm_dp_mst_handle_up_req()
4142 drm_dbg_kms(mgr->dev, "Received unknown up req type, ignoring: %x\n", in drm_dp_mst_handle_up_req()
4143 up_req->msg.req_type); in drm_dp_mst_handle_up_req()
4148 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, up_req->msg.req_type, in drm_dp_mst_handle_up_req()
4151 if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) { in drm_dp_mst_handle_up_req()
4153 &up_req->msg.u.conn_stat; in drm_dp_mst_handle_up_req()
4155 drm_dbg_kms(mgr->dev, "Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", in drm_dp_mst_handle_up_req()
4156 conn_stat->port_number, in drm_dp_mst_handle_up_req()
4157 conn_stat->legacy_device_plug_status, in drm_dp_mst_handle_up_req()
4158 conn_stat->displayport_device_plug_status, in drm_dp_mst_handle_up_req()
4159 conn_stat->message_capability_status, in drm_dp_mst_handle_up_req()
4160 conn_stat->input_port, in drm_dp_mst_handle_up_req()
4161 conn_stat->peer_device_type); in drm_dp_mst_handle_up_req()
4162 } else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) { in drm_dp_mst_handle_up_req()
4164 &up_req->msg.u.resource_stat; in drm_dp_mst_handle_up_req()
4166 drm_dbg_kms(mgr->dev, "Got RSN: pn: %d avail_pbn %d\n", in drm_dp_mst_handle_up_req()
4167 res_stat->port_number, in drm_dp_mst_handle_up_req()
4168 res_stat->available_pbn); in drm_dp_mst_handle_up_req()
4171 up_req->hdr = mgr->up_req_recv.initial_hdr; in drm_dp_mst_handle_up_req()
4172 mutex_lock(&mgr->up_req_lock); in drm_dp_mst_handle_up_req()
4173 list_add_tail(&up_req->next, &mgr->up_req_list); in drm_dp_mst_handle_up_req()
4174 mutex_unlock(&mgr->up_req_lock); in drm_dp_mst_handle_up_req()
4175 queue_work(system_long_wq, &mgr->up_req_work); in drm_dp_mst_handle_up_req()
4178 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); in drm_dp_mst_handle_up_req()
4183 * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
4190 * topology manager will process the sideband messages received as a result
4200 if (sc != mgr->sink_count) { in drm_dp_mst_hpd_irq()
4201 mgr->sink_count = sc; in drm_dp_mst_hpd_irq()
4221 * drm_dp_mst_detect_port() - get connection status for an MST port
4242 ret = drm_modeset_lock(&mgr->base.lock, ctx); in drm_dp_mst_detect_port()
4248 if (!port->ddps) in drm_dp_mst_detect_port()
4251 switch (port->pdt) { in drm_dp_mst_detect_port()
4255 if (!port->mcs) in drm_dp_mst_detect_port()
4261 /* for logical ports - cache the EDID */ in drm_dp_mst_detect_port()
4262 if (port->port_num >= DP_MST_LOGICAL_PORT_0 && !port->cached_edid) in drm_dp_mst_detect_port()
4263 port->cached_edid = drm_get_edid(connector, &port->aux.ddc); in drm_dp_mst_detect_port()
4266 if (port->ldps) in drm_dp_mst_detect_port()
4277 * drm_dp_mst_get_edid() - get EDID for an MST port
4295 if (port->cached_edid) in drm_dp_mst_get_edid()
4296 edid = drm_edid_duplicate(port->cached_edid); in drm_dp_mst_get_edid()
4298 edid = drm_get_edid(connector, &port->aux.ddc); in drm_dp_mst_get_edid()
4300 port->has_audio = drm_detect_monitor_audio(edid); in drm_dp_mst_get_edid()
4307 * drm_dp_find_vcpi_slots() - Find VCPI slots for this PBN value
4323 num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div); in drm_dp_find_vcpi_slots()
4325 /* max. time slots - one slot for MTP header */ in drm_dp_find_vcpi_slots()
4327 return -ENOSPC; in drm_dp_find_vcpi_slots()
4337 /* max. time slots - one slot for MTP header */ in drm_dp_init_vcpi()
4339 return -ENOSPC; in drm_dp_init_vcpi()
4341 vcpi->pbn = pbn; in drm_dp_init_vcpi()
4342 vcpi->aligned_pbn = slots * mgr->pbn_div; in drm_dp_init_vcpi()
4343 vcpi->num_slots = slots; in drm_dp_init_vcpi()
4352 * drm_dp_atomic_find_vcpi_slots() - Find and add VCPI slots to the state
4354 * @mgr: MST topology manager for the port
4396 list_for_each_entry(pos, &topology_state->vcpis, next) { in drm_dp_atomic_find_vcpi_slots()
4397 if (pos->port == port) { in drm_dp_atomic_find_vcpi_slots()
4399 prev_slots = vcpi->vcpi; in drm_dp_atomic_find_vcpi_slots()
4400 prev_bw = vcpi->pbn; in drm_dp_atomic_find_vcpi_slots()
4408 drm_err(mgr->dev, in drm_dp_atomic_find_vcpi_slots()
4411 return -EINVAL; in drm_dp_atomic_find_vcpi_slots()
4423 pbn_div = mgr->pbn_div; in drm_dp_atomic_find_vcpi_slots()
4427 drm_dbg_atomic(mgr->dev, "[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d\n", in drm_dp_atomic_find_vcpi_slots()
4428 port->connector->base.id, port->connector->name, in drm_dp_atomic_find_vcpi_slots()
4430 drm_dbg_atomic(mgr->dev, "[CONNECTOR:%d:%s] [MST PORT:%p] PBN %d -> %d\n", in drm_dp_atomic_find_vcpi_slots()
4431 port->connector->base.id, port->connector->name, in drm_dp_atomic_find_vcpi_slots()
4438 return -ENOMEM; in drm_dp_atomic_find_vcpi_slots()
4441 vcpi->port = port; in drm_dp_atomic_find_vcpi_slots()
4442 list_add(&vcpi->next, &topology_state->vcpis); in drm_dp_atomic_find_vcpi_slots()
4444 vcpi->vcpi = req_slots; in drm_dp_atomic_find_vcpi_slots()
4445 vcpi->pbn = pbn; in drm_dp_atomic_find_vcpi_slots()
4452 * drm_dp_atomic_release_vcpi_slots() - Release allocated vcpi slots
4454 * @mgr: MST topology manager for the port
4489 list_for_each_entry(pos, &topology_state->vcpis, next) { in drm_dp_atomic_release_vcpi_slots()
4490 if (pos->port == port) { in drm_dp_atomic_release_vcpi_slots()
4496 drm_err(mgr->dev, "no VCPI for [MST PORT:%p] found in mst state %p\n", in drm_dp_atomic_release_vcpi_slots()
4497 port, &topology_state->base); in drm_dp_atomic_release_vcpi_slots()
4498 return -EINVAL; in drm_dp_atomic_release_vcpi_slots()
4501 drm_dbg_atomic(mgr->dev, "[MST PORT:%p] VCPI %d -> 0\n", port, pos->vcpi); in drm_dp_atomic_release_vcpi_slots()
4502 if (pos->vcpi) { in drm_dp_atomic_release_vcpi_slots()
4504 pos->vcpi = 0; in drm_dp_atomic_release_vcpi_slots()
4505 pos->pbn = 0; in drm_dp_atomic_release_vcpi_slots()
4513 * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
4531 if (port->vcpi.vcpi > 0) { in drm_dp_mst_allocate_vcpi()
4532 drm_dbg_kms(mgr->dev, in drm_dp_mst_allocate_vcpi()
4533 "payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", in drm_dp_mst_allocate_vcpi()
4534 port->vcpi.vcpi, port->vcpi.pbn, pbn); in drm_dp_mst_allocate_vcpi()
4535 if (pbn == port->vcpi.pbn) { in drm_dp_mst_allocate_vcpi()
4541 ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots); in drm_dp_mst_allocate_vcpi()
4543 drm_dbg_kms(mgr->dev, "failed to init vcpi slots=%d max=63 ret=%d\n", in drm_dp_mst_allocate_vcpi()
4544 DIV_ROUND_UP(pbn, mgr->pbn_div), ret); in drm_dp_mst_allocate_vcpi()
4548 drm_dbg_kms(mgr->dev, "initing vcpi for pbn=%d slots=%d\n", pbn, port->vcpi.num_slots); in drm_dp_mst_allocate_vcpi()
4567 slots = port->vcpi.num_slots; in drm_dp_mst_get_vcpi_slots()
4574 * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
4587 port->vcpi.num_slots = 0; in drm_dp_mst_reset_vcpi_slots()
4592 * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
4604 if (!port->vcpi.vcpi) in drm_dp_mst_deallocate_vcpi()
4607 mutex_lock(&mgr->lock); in drm_dp_mst_deallocate_vcpi()
4608 skip = !drm_dp_mst_port_downstream_of_branch(port, mgr->mst_primary); in drm_dp_mst_deallocate_vcpi()
4609 mutex_unlock(&mgr->lock); in drm_dp_mst_deallocate_vcpi()
4614 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); in drm_dp_mst_deallocate_vcpi()
4615 port->vcpi.num_slots = 0; in drm_dp_mst_deallocate_vcpi()
4616 port->vcpi.pbn = 0; in drm_dp_mst_deallocate_vcpi()
4617 port->vcpi.aligned_pbn = 0; in drm_dp_mst_deallocate_vcpi()
4618 port->vcpi.vcpi = 0; in drm_dp_mst_deallocate_vcpi()
4630 drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, in drm_dp_dpcd_write_payload()
4634 payload_alloc[1] = payload->start_slot; in drm_dp_dpcd_write_payload()
4635 payload_alloc[2] = payload->num_slots; in drm_dp_dpcd_write_payload()
4637 ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3); in drm_dp_dpcd_write_payload()
4639 drm_dbg_kms(mgr->dev, "failed to write payload allocation %d\n", ret); in drm_dp_dpcd_write_payload()
4644 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status); in drm_dp_dpcd_write_payload()
4646 drm_dbg_kms(mgr->dev, "failed to read payload table status %d\n", ret); in drm_dp_dpcd_write_payload()
4656 drm_dbg_kms(mgr->dev, "status not set after read payload table status %d\n", in drm_dp_dpcd_write_payload()
4658 ret = -EINVAL; in drm_dp_dpcd_write_payload()
4679 * drm_dp_check_act_status() - Polls for ACT handled status.
4683 * polling for the ACT handled bit for up to 3 seconds (yes-some hubs really
4700 ret = readx_poll_timeout(do_get_act_status, mgr->aux, status, in drm_dp_check_act_status()
4704 drm_err(mgr->dev, "Failed to get ACT after %dms, last status: %02x\n", in drm_dp_check_act_status()
4706 return -EINVAL; in drm_dp_check_act_status()
4709 * Failure here isn't unexpected - the hub may have in drm_dp_check_act_status()
4712 drm_dbg_kms(mgr->dev, "Failed to read payload table status: %d\n", status); in drm_dp_check_act_status()
4721 * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
4757 queue_work(system_long_wq, &mgr->tx_work); in drm_dp_mst_kick_tx()
4786 int tabs = mstb->lct; in drm_dp_mst_dump_mstb()
4794 seq_printf(m, "%smstb - [%p]: num_ports: %d\n", prefix, mstb, mstb->num_ports); in drm_dp_mst_dump_mstb()
4795 list_for_each_entry(port, &mstb->ports, next) { in drm_dp_mst_dump_mstb()
4796 seq_printf(m, "%sport %d - [%p] (%s - %s): ddps: %d, ldps: %d, sdp: %d/%d, fec: %s, conn: %p\n", in drm_dp_mst_dump_mstb()
4798 port->port_num, in drm_dp_mst_dump_mstb()
4800 port->input ? "input" : "output", in drm_dp_mst_dump_mstb()
4801 pdt_to_string(port->pdt), in drm_dp_mst_dump_mstb()
4802 port->ddps, in drm_dp_mst_dump_mstb()
4803 port->ldps, in drm_dp_mst_dump_mstb()
4804 port->num_sdp_streams, in drm_dp_mst_dump_mstb()
4805 port->num_sdp_stream_sinks, in drm_dp_mst_dump_mstb()
4806 port->fec_capable ? "true" : "false", in drm_dp_mst_dump_mstb()
4807 port->connector); in drm_dp_mst_dump_mstb()
4808 if (port->mstb) in drm_dp_mst_dump_mstb()
4809 drm_dp_mst_dump_mstb(m, port->mstb); in drm_dp_mst_dump_mstb()
4821 if (drm_dp_dpcd_read(mgr->aux, in dump_dp_payload_table()
4835 mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port); in fetch_monitor_name()
4840 * drm_dp_mst_dump_topology(): dump topology to seq file.
4842 * @mgr: manager to dump current topology for.
4844 * helper to dump MST topology to a seq file for debugfs.
4852 mutex_lock(&mgr->lock); in drm_dp_mst_dump_topology()
4853 if (mgr->mst_primary) in drm_dp_mst_dump_topology()
4854 drm_dp_mst_dump_mstb(m, mgr->mst_primary); in drm_dp_mst_dump_topology()
4857 mutex_unlock(&mgr->lock); in drm_dp_mst_dump_topology()
4859 mutex_lock(&mgr->payload_lock); in drm_dp_mst_dump_topology()
4861 …ayload_mask: %lx, vcpi_mask: %lx, max_payloads: %d\n", mgr->payload_mask, mgr->vcpi_mask, mgr->max… in drm_dp_mst_dump_topology()
4864 for (i = 0; i < mgr->max_payloads; i++) { in drm_dp_mst_dump_topology()
4865 if (mgr->proposed_vcpis[i]) { in drm_dp_mst_dump_topology()
4868 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi); in drm_dp_mst_dump_topology()
4872 port->port_num, in drm_dp_mst_dump_topology()
4873 port->vcpi.vcpi, in drm_dp_mst_dump_topology()
4874 port->vcpi.num_slots, in drm_dp_mst_dump_topology()
4877 seq_printf(m, "%6d - Unused\n", i); in drm_dp_mst_dump_topology()
4881 for (i = 0; i < mgr->max_payloads; i++) { in drm_dp_mst_dump_topology()
4884 mgr->payloads[i].payload_state, in drm_dp_mst_dump_topology()
4885 mgr->payloads[i].start_slot, in drm_dp_mst_dump_topology()
4886 mgr->payloads[i].num_slots); in drm_dp_mst_dump_topology()
4888 mutex_unlock(&mgr->payload_lock); in drm_dp_mst_dump_topology()
4891 mutex_lock(&mgr->lock); in drm_dp_mst_dump_topology()
4892 if (mgr->mst_primary) { in drm_dp_mst_dump_topology()
4896 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE); in drm_dp_mst_dump_topology()
4903 ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2); in drm_dp_mst_dump_topology()
4910 ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1); in drm_dp_mst_dump_topology()
4918 ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE); in drm_dp_mst_dump_topology()
4934 mutex_unlock(&mgr->lock); in drm_dp_mst_dump_topology()
4943 mutex_lock(&mgr->qlock); in drm_dp_tx_work()
4944 if (!list_empty(&mgr->tx_msg_downq)) in drm_dp_tx_work()
4946 mutex_unlock(&mgr->qlock); in drm_dp_tx_work()
4952 drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs); in drm_dp_delayed_destroy_port()
4954 if (port->connector) { in drm_dp_delayed_destroy_port()
4955 drm_connector_unregister(port->connector); in drm_dp_delayed_destroy_port()
4956 drm_connector_put(port->connector); in drm_dp_delayed_destroy_port()
4965 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; in drm_dp_delayed_destroy_mstb()
4970 mutex_lock(&mgr->lock); in drm_dp_delayed_destroy_mstb()
4971 list_for_each_entry_safe(port, port_tmp, &mstb->ports, next) { in drm_dp_delayed_destroy_mstb()
4972 list_del(&port->next); in drm_dp_delayed_destroy_mstb()
4975 mutex_unlock(&mgr->lock); in drm_dp_delayed_destroy_mstb()
4978 mutex_lock(&mstb->mgr->qlock); in drm_dp_delayed_destroy_mstb()
4979 list_for_each_entry_safe(txmsg, txmsg_tmp, &mgr->tx_msg_downq, next) { in drm_dp_delayed_destroy_mstb()
4980 if (txmsg->dst != mstb) in drm_dp_delayed_destroy_mstb()
4983 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT; in drm_dp_delayed_destroy_mstb()
4984 list_del(&txmsg->next); in drm_dp_delayed_destroy_mstb()
4987 mutex_unlock(&mstb->mgr->qlock); in drm_dp_delayed_destroy_mstb()
4990 wake_up_all(&mstb->mgr->tx_waitq); in drm_dp_delayed_destroy_mstb()
5004 * connector lock before destroying the mstb/port, to avoid AB->BA in drm_dp_delayed_destroy_work()
5013 mutex_lock(&mgr->delayed_destroy_lock); in drm_dp_delayed_destroy_work()
5014 mstb = list_first_entry_or_null(&mgr->destroy_branch_device_list, in drm_dp_delayed_destroy_work()
5018 list_del(&mstb->destroy_next); in drm_dp_delayed_destroy_work()
5019 mutex_unlock(&mgr->delayed_destroy_lock); in drm_dp_delayed_destroy_work()
5031 mutex_lock(&mgr->delayed_destroy_lock); in drm_dp_delayed_destroy_work()
5032 port = list_first_entry_or_null(&mgr->destroy_port_list, in drm_dp_delayed_destroy_work()
5036 list_del(&port->next); in drm_dp_delayed_destroy_work()
5037 mutex_unlock(&mgr->delayed_destroy_lock); in drm_dp_delayed_destroy_work()
5049 drm_kms_helper_hotplug_event(mgr->dev); in drm_dp_delayed_destroy_work()
5056 to_dp_mst_topology_state(obj->state); in drm_dp_mst_duplicate_state()
5063 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); in drm_dp_mst_duplicate_state()
5065 INIT_LIST_HEAD(&state->vcpis); in drm_dp_mst_duplicate_state()
5067 list_for_each_entry(pos, &old_state->vcpis, next) { in drm_dp_mst_duplicate_state()
5069 if (!pos->vcpi) in drm_dp_mst_duplicate_state()
5076 drm_dp_mst_get_port_malloc(vcpi->port); in drm_dp_mst_duplicate_state()
5077 list_add(&vcpi->next, &state->vcpis); in drm_dp_mst_duplicate_state()
5080 return &state->base; in drm_dp_mst_duplicate_state()
5083 list_for_each_entry_safe(pos, vcpi, &state->vcpis, next) { in drm_dp_mst_duplicate_state()
5084 drm_dp_mst_put_port_malloc(pos->port); in drm_dp_mst_duplicate_state()
5099 list_for_each_entry_safe(pos, tmp, &mst_state->vcpis, next) { in drm_dp_mst_destroy_state()
5100 /* We only keep references to ports with non-zero VCPIs */ in drm_dp_mst_destroy_state()
5101 if (pos->vcpi) in drm_dp_mst_destroy_state()
5102 drm_dp_mst_put_port_malloc(pos->port); in drm_dp_mst_destroy_state()
5112 while (port->parent) { in drm_dp_mst_port_downstream_of_branch()
5113 if (port->parent == branch) in drm_dp_mst_port_downstream_of_branch()
5116 if (port->parent->port_parent) in drm_dp_mst_port_downstream_of_branch()
5117 port = port->parent->port_parent; in drm_dp_mst_port_downstream_of_branch()
5140 list_for_each_entry(vcpi, &state->vcpis, next) { in drm_dp_mst_atomic_check_mstb_bw_limit()
5141 if (!vcpi->pbn || in drm_dp_mst_atomic_check_mstb_bw_limit()
5142 !drm_dp_mst_port_downstream_of_branch(vcpi->port, mstb)) in drm_dp_mst_atomic_check_mstb_bw_limit()
5151 if (mstb->port_parent) in drm_dp_mst_atomic_check_mstb_bw_limit()
5152 drm_dbg_atomic(mstb->mgr->dev, in drm_dp_mst_atomic_check_mstb_bw_limit()
5154 mstb->port_parent->parent, mstb->port_parent, mstb); in drm_dp_mst_atomic_check_mstb_bw_limit()
5156 drm_dbg_atomic(mstb->mgr->dev, "[MSTB:%p] Checking bandwidth limits\n", mstb); in drm_dp_mst_atomic_check_mstb_bw_limit()
5158 list_for_each_entry(port, &mstb->ports, next) { in drm_dp_mst_atomic_check_mstb_bw_limit()
5176 if (port->pdt == DP_PEER_DEVICE_NONE) in drm_dp_mst_atomic_check_port_bw_limit()
5179 if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) { in drm_dp_mst_atomic_check_port_bw_limit()
5182 list_for_each_entry(vcpi, &state->vcpis, next) { in drm_dp_mst_atomic_check_port_bw_limit()
5183 if (vcpi->port != port) in drm_dp_mst_atomic_check_port_bw_limit()
5185 if (!vcpi->pbn) in drm_dp_mst_atomic_check_port_bw_limit()
5198 if (!port->full_pbn) { in drm_dp_mst_atomic_check_port_bw_limit()
5199 drm_dbg_atomic(port->mgr->dev, in drm_dp_mst_atomic_check_port_bw_limit()
5201 port->parent, port); in drm_dp_mst_atomic_check_port_bw_limit()
5202 return -EINVAL; in drm_dp_mst_atomic_check_port_bw_limit()
5205 pbn_used = vcpi->pbn; in drm_dp_mst_atomic_check_port_bw_limit()
5207 pbn_used = drm_dp_mst_atomic_check_mstb_bw_limit(port->mstb, in drm_dp_mst_atomic_check_port_bw_limit()
5213 if (pbn_used > port->full_pbn) { in drm_dp_mst_atomic_check_port_bw_limit()
5214 drm_dbg_atomic(port->mgr->dev, in drm_dp_mst_atomic_check_port_bw_limit()
5216 port->parent, port, pbn_used, port->full_pbn); in drm_dp_mst_atomic_check_port_bw_limit()
5217 return -ENOSPC; in drm_dp_mst_atomic_check_port_bw_limit()
5220 drm_dbg_atomic(port->mgr->dev, "[MSTB:%p] [MST PORT:%p] uses %d out of %d PBN\n", in drm_dp_mst_atomic_check_port_bw_limit()
5221 port->parent, port, pbn_used, port->full_pbn); in drm_dp_mst_atomic_check_port_bw_limit()
5233 list_for_each_entry(vcpi, &mst_state->vcpis, next) { in drm_dp_mst_atomic_check_vcpi_alloc_limit()
5234 /* Releasing VCPI is always OK-even if the port is gone */ in drm_dp_mst_atomic_check_vcpi_alloc_limit()
5235 if (!vcpi->vcpi) { in drm_dp_mst_atomic_check_vcpi_alloc_limit()
5236 drm_dbg_atomic(mgr->dev, "[MST PORT:%p] releases all VCPI slots\n", in drm_dp_mst_atomic_check_vcpi_alloc_limit()
5237 vcpi->port); in drm_dp_mst_atomic_check_vcpi_alloc_limit()
5241 drm_dbg_atomic(mgr->dev, "[MST PORT:%p] requires %d vcpi slots\n", in drm_dp_mst_atomic_check_vcpi_alloc_limit()
5242 vcpi->port, vcpi->vcpi); in drm_dp_mst_atomic_check_vcpi_alloc_limit()
5244 avail_slots -= vcpi->vcpi; in drm_dp_mst_atomic_check_vcpi_alloc_limit()
5246 drm_dbg_atomic(mgr->dev, in drm_dp_mst_atomic_check_vcpi_alloc_limit()
5248 vcpi->port, mst_state, avail_slots + vcpi->vcpi); in drm_dp_mst_atomic_check_vcpi_alloc_limit()
5249 return -ENOSPC; in drm_dp_mst_atomic_check_vcpi_alloc_limit()
5252 if (++payload_count > mgr->max_payloads) { in drm_dp_mst_atomic_check_vcpi_alloc_limit()
5253 drm_dbg_atomic(mgr->dev, in drm_dp_mst_atomic_check_vcpi_alloc_limit()
5255 mgr, mst_state, mgr->max_payloads); in drm_dp_mst_atomic_check_vcpi_alloc_limit()
5256 return -EINVAL; in drm_dp_mst_atomic_check_vcpi_alloc_limit()
5259 drm_dbg_atomic(mgr->dev, "[MST MGR:%p] mst state %p VCPI avail=%d used=%d\n", in drm_dp_mst_atomic_check_vcpi_alloc_limit()
5260 mgr, mst_state, avail_slots, 63 - avail_slots); in drm_dp_mst_atomic_check_vcpi_alloc_limit()
5268 * @mgr: MST topology manager
5270 * Whenever there is a change in mst topology
5273 * CRTCs in that topology
5290 return -EINVAL; in drm_dp_mst_add_affected_dsc_crtcs()
5292 list_for_each_entry(pos, &mst_state->vcpis, next) { in drm_dp_mst_add_affected_dsc_crtcs()
5294 connector = pos->port->connector; in drm_dp_mst_add_affected_dsc_crtcs()
5297 return -EINVAL; in drm_dp_mst_add_affected_dsc_crtcs()
5304 crtc = conn_state->crtc; in drm_dp_mst_add_affected_dsc_crtcs()
5309 if (!drm_dp_mst_dsc_aux_for_port(pos->port)) in drm_dp_mst_add_affected_dsc_crtcs()
5312 crtc_state = drm_atomic_get_crtc_state(mst_state->base.state, crtc); in drm_dp_mst_add_affected_dsc_crtcs()
5317 drm_dbg_atomic(mgr->dev, "[MST MGR:%p] Setting mode_changed flag on CRTC %p\n", in drm_dp_mst_add_affected_dsc_crtcs()
5320 crtc_state->mode_changed = true; in drm_dp_mst_add_affected_dsc_crtcs()
5327 * drm_dp_mst_atomic_enable_dsc - Set DSC Enable Flag to On/Off
5350 mst_state = drm_atomic_get_mst_topology_state(state, port->mgr); in drm_dp_mst_atomic_enable_dsc()
5355 list_for_each_entry(pos, &mst_state->vcpis, next) { in drm_dp_mst_atomic_enable_dsc()
5356 if (pos->port == port) { in drm_dp_mst_atomic_enable_dsc()
5363 drm_dbg_atomic(state->dev, in drm_dp_mst_atomic_enable_dsc()
5366 return -EINVAL; in drm_dp_mst_atomic_enable_dsc()
5369 if (pos->dsc_enabled == enable) { in drm_dp_mst_atomic_enable_dsc()
5370 drm_dbg_atomic(state->dev, in drm_dp_mst_atomic_enable_dsc()
5372 port, enable, pos->vcpi); in drm_dp_mst_atomic_enable_dsc()
5373 vcpi = pos->vcpi; in drm_dp_mst_atomic_enable_dsc()
5377 vcpi = drm_dp_atomic_find_vcpi_slots(state, port->mgr, port, pbn, pbn_div); in drm_dp_mst_atomic_enable_dsc()
5378 drm_dbg_atomic(state->dev, in drm_dp_mst_atomic_enable_dsc()
5382 return -EINVAL; in drm_dp_mst_atomic_enable_dsc()
5385 pos->dsc_enabled = enable; in drm_dp_mst_atomic_enable_dsc()
5391 * drm_dp_mst_atomic_check - Check that the new state of an MST topology in an
5395 * Checks the given topology state for an atomic update to ensure that it's
5418 if (!mgr->mst_state) in drm_dp_mst_atomic_check()
5425 mutex_lock(&mgr->lock); in drm_dp_mst_atomic_check()
5426 ret = drm_dp_mst_atomic_check_mstb_bw_limit(mgr->mst_primary, in drm_dp_mst_atomic_check()
5428 mutex_unlock(&mgr->lock); in drm_dp_mst_atomic_check()
5446 * drm_atomic_get_mst_topology_state: get MST topology state
5449 * @mgr: MST topology manager, also the private object in this case
5453 * topology object. Also, drm_atomic_get_private_obj_state() expects the caller
5458 * The MST topology state or error pointer.
5463 return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base)); in drm_atomic_get_mst_topology_state()
5468 * drm_dp_mst_topology_mgr_init - initialise a topology manager
5470 * @dev: device providing this structure - for i2c addition.
5488 mutex_init(&mgr->lock); in drm_dp_mst_topology_mgr_init()
5489 mutex_init(&mgr->qlock); in drm_dp_mst_topology_mgr_init()
5490 mutex_init(&mgr->payload_lock); in drm_dp_mst_topology_mgr_init()
5491 mutex_init(&mgr->delayed_destroy_lock); in drm_dp_mst_topology_mgr_init()
5492 mutex_init(&mgr->up_req_lock); in drm_dp_mst_topology_mgr_init()
5493 mutex_init(&mgr->probe_lock); in drm_dp_mst_topology_mgr_init()
5495 mutex_init(&mgr->topology_ref_history_lock); in drm_dp_mst_topology_mgr_init()
5497 INIT_LIST_HEAD(&mgr->tx_msg_downq); in drm_dp_mst_topology_mgr_init()
5498 INIT_LIST_HEAD(&mgr->destroy_port_list); in drm_dp_mst_topology_mgr_init()
5499 INIT_LIST_HEAD(&mgr->destroy_branch_device_list); in drm_dp_mst_topology_mgr_init()
5500 INIT_LIST_HEAD(&mgr->up_req_list); in drm_dp_mst_topology_mgr_init()
5504 * requeuing will be also flushed when deiniting the topology manager. in drm_dp_mst_topology_mgr_init()
5506 mgr->delayed_destroy_wq = alloc_ordered_workqueue("drm_dp_mst_wq", 0); in drm_dp_mst_topology_mgr_init()
5507 if (mgr->delayed_destroy_wq == NULL) in drm_dp_mst_topology_mgr_init()
5508 return -ENOMEM; in drm_dp_mst_topology_mgr_init()
5510 INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work); in drm_dp_mst_topology_mgr_init()
5511 INIT_WORK(&mgr->tx_work, drm_dp_tx_work); in drm_dp_mst_topology_mgr_init()
5512 INIT_WORK(&mgr->delayed_destroy_work, drm_dp_delayed_destroy_work); in drm_dp_mst_topology_mgr_init()
5513 INIT_WORK(&mgr->up_req_work, drm_dp_mst_up_req_work); in drm_dp_mst_topology_mgr_init()
5514 init_waitqueue_head(&mgr->tx_waitq); in drm_dp_mst_topology_mgr_init()
5515 mgr->dev = dev; in drm_dp_mst_topology_mgr_init()
5516 mgr->aux = aux; in drm_dp_mst_topology_mgr_init()
5517 mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes; in drm_dp_mst_topology_mgr_init()
5518 mgr->max_payloads = max_payloads; in drm_dp_mst_topology_mgr_init()
5519 mgr->max_lane_count = max_lane_count; in drm_dp_mst_topology_mgr_init()
5520 mgr->max_link_rate = max_link_rate; in drm_dp_mst_topology_mgr_init()
5521 mgr->conn_base_id = conn_base_id; in drm_dp_mst_topology_mgr_init()
5522 if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 || in drm_dp_mst_topology_mgr_init()
5523 max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8) in drm_dp_mst_topology_mgr_init()
5524 return -EINVAL; in drm_dp_mst_topology_mgr_init()
5525 mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL); in drm_dp_mst_topology_mgr_init()
5526 if (!mgr->payloads) in drm_dp_mst_topology_mgr_init()
5527 return -ENOMEM; in drm_dp_mst_topology_mgr_init()
5528 mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL); in drm_dp_mst_topology_mgr_init()
5529 if (!mgr->proposed_vcpis) in drm_dp_mst_topology_mgr_init()
5530 return -ENOMEM; in drm_dp_mst_topology_mgr_init()
5531 set_bit(0, &mgr->payload_mask); in drm_dp_mst_topology_mgr_init()
5535 return -ENOMEM; in drm_dp_mst_topology_mgr_init()
5537 mst_state->mgr = mgr; in drm_dp_mst_topology_mgr_init()
5538 INIT_LIST_HEAD(&mst_state->vcpis); in drm_dp_mst_topology_mgr_init()
5540 drm_atomic_private_obj_init(dev, &mgr->base, in drm_dp_mst_topology_mgr_init()
5541 &mst_state->base, in drm_dp_mst_topology_mgr_init()
5549 * drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
5555 flush_work(&mgr->work); in drm_dp_mst_topology_mgr_destroy()
5557 if (mgr->delayed_destroy_wq) { in drm_dp_mst_topology_mgr_destroy()
5558 destroy_workqueue(mgr->delayed_destroy_wq); in drm_dp_mst_topology_mgr_destroy()
5559 mgr->delayed_destroy_wq = NULL; in drm_dp_mst_topology_mgr_destroy()
5561 mutex_lock(&mgr->payload_lock); in drm_dp_mst_topology_mgr_destroy()
5562 kfree(mgr->payloads); in drm_dp_mst_topology_mgr_destroy()
5563 mgr->payloads = NULL; in drm_dp_mst_topology_mgr_destroy()
5564 kfree(mgr->proposed_vcpis); in drm_dp_mst_topology_mgr_destroy()
5565 mgr->proposed_vcpis = NULL; in drm_dp_mst_topology_mgr_destroy()
5566 mutex_unlock(&mgr->payload_lock); in drm_dp_mst_topology_mgr_destroy()
5567 mgr->dev = NULL; in drm_dp_mst_topology_mgr_destroy()
5568 mgr->aux = NULL; in drm_dp_mst_topology_mgr_destroy()
5569 drm_atomic_private_obj_fini(&mgr->base); in drm_dp_mst_topology_mgr_destroy()
5570 mgr->funcs = NULL; in drm_dp_mst_topology_mgr_destroy()
5572 mutex_destroy(&mgr->delayed_destroy_lock); in drm_dp_mst_topology_mgr_destroy()
5573 mutex_destroy(&mgr->payload_lock); in drm_dp_mst_topology_mgr_destroy()
5574 mutex_destroy(&mgr->qlock); in drm_dp_mst_topology_mgr_destroy()
5575 mutex_destroy(&mgr->lock); in drm_dp_mst_topology_mgr_destroy()
5576 mutex_destroy(&mgr->up_req_lock); in drm_dp_mst_topology_mgr_destroy()
5577 mutex_destroy(&mgr->probe_lock); in drm_dp_mst_topology_mgr_destroy()
5579 mutex_destroy(&mgr->topology_ref_history_lock); in drm_dp_mst_topology_mgr_destroy()
5588 if (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS) in remote_i2c_read_ok()
5591 for (i = 0; i < num - 1; i++) { in remote_i2c_read_ok()
5597 return msgs[num - 1].flags & I2C_M_RD && in remote_i2c_read_ok()
5598 msgs[num - 1].len <= 0xff; in remote_i2c_read_ok()
5605 for (i = 0; i < num - 1; i++) { in remote_i2c_write_ok()
5611 return !(msgs[num - 1].flags & I2C_M_RD) && msgs[num - 1].len <= 0xff; in remote_i2c_write_ok()
5618 struct drm_dp_mst_topology_mgr *mgr = port->mgr; in drm_dp_mst_i2c_read()
5626 msg.u.i2c_read.num_transactions = num - 1; in drm_dp_mst_i2c_read()
5627 msg.u.i2c_read.port_number = port->port_num; in drm_dp_mst_i2c_read()
5628 for (i = 0; i < num - 1; i++) { in drm_dp_mst_i2c_read()
5634 msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr; in drm_dp_mst_i2c_read()
5635 msg.u.i2c_read.num_bytes_read = msgs[num - 1].len; in drm_dp_mst_i2c_read()
5639 ret = -ENOMEM; in drm_dp_mst_i2c_read()
5643 txmsg->dst = mstb; in drm_dp_mst_i2c_read()
5651 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) { in drm_dp_mst_i2c_read()
5652 ret = -EREMOTEIO; in drm_dp_mst_i2c_read()
5655 if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) { in drm_dp_mst_i2c_read()
5656 ret = -EIO; in drm_dp_mst_i2c_read()
5659 memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len); in drm_dp_mst_i2c_read()
5671 struct drm_dp_mst_topology_mgr *mgr = port->mgr; in drm_dp_mst_i2c_write()
5679 ret = -ENOMEM; in drm_dp_mst_i2c_write()
5685 msg.u.i2c_write.port_number = port->port_num; in drm_dp_mst_i2c_write()
5691 txmsg->dst = mstb; in drm_dp_mst_i2c_write()
5698 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) { in drm_dp_mst_i2c_write()
5699 ret = -EREMOTEIO; in drm_dp_mst_i2c_write()
5712 /* I2C device */
5716 struct drm_dp_aux *aux = adapter->algo_data; in drm_dp_mst_i2c_xfer()
5720 struct drm_dp_mst_topology_mgr *mgr = port->mgr; in drm_dp_mst_i2c_xfer()
5723 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent); in drm_dp_mst_i2c_xfer()
5725 return -EREMOTEIO; in drm_dp_mst_i2c_xfer()
5732 drm_dbg_kms(mgr->dev, "Unsupported I2C transaction for MST device\n"); in drm_dp_mst_i2c_xfer()
5733 ret = -EIO; in drm_dp_mst_i2c_xfer()
5754 * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
5755 * @port: The port to add the I2C bus on
5761 struct drm_dp_aux *aux = &port->aux; in drm_dp_mst_register_i2c_bus()
5762 struct device *parent_dev = port->mgr->dev->dev; in drm_dp_mst_register_i2c_bus()
5764 aux->ddc.algo = &drm_dp_mst_i2c_algo; in drm_dp_mst_register_i2c_bus()
5765 aux->ddc.algo_data = aux; in drm_dp_mst_register_i2c_bus()
5766 aux->ddc.retries = 3; in drm_dp_mst_register_i2c_bus()
5768 aux->ddc.class = I2C_CLASS_DDC; in drm_dp_mst_register_i2c_bus()
5769 aux->ddc.owner = THIS_MODULE; in drm_dp_mst_register_i2c_bus()
5771 aux->ddc.dev.parent = parent_dev; in drm_dp_mst_register_i2c_bus()
5772 aux->ddc.dev.of_node = parent_dev->of_node; in drm_dp_mst_register_i2c_bus()
5774 strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(parent_dev), in drm_dp_mst_register_i2c_bus()
5775 sizeof(aux->ddc.name)); in drm_dp_mst_register_i2c_bus()
5777 return i2c_add_adapter(&aux->ddc); in drm_dp_mst_register_i2c_bus()
5781 * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
5782 * @port: The port to remove the I2C bus from
5786 i2c_del_adapter(&port->aux.ddc); in drm_dp_mst_unregister_i2c_bus()
5790 * drm_dp_mst_is_virtual_dpcd() - Is the given port a virtual DP Peer Device
5793 * A single physical MST hub object can be represented in the topology
5800 * May acquire mgr->lock
5809 if (!port || port->dpcd_rev < DP_DPCD_REV_14) in drm_dp_mst_is_virtual_dpcd()
5813 if (port->port_num >= 8) in drm_dp_mst_is_virtual_dpcd()
5816 /* DP-to-HDMI Protocol Converter */ in drm_dp_mst_is_virtual_dpcd()
5817 if (port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV && in drm_dp_mst_is_virtual_dpcd()
5818 !port->mcs && in drm_dp_mst_is_virtual_dpcd()
5819 port->ldps) in drm_dp_mst_is_virtual_dpcd()
5822 /* DP-to-DP */ in drm_dp_mst_is_virtual_dpcd()
5823 mutex_lock(&port->mgr->lock); in drm_dp_mst_is_virtual_dpcd()
5824 if (port->pdt == DP_PEER_DEVICE_MST_BRANCHING && in drm_dp_mst_is_virtual_dpcd()
5825 port->mstb && in drm_dp_mst_is_virtual_dpcd()
5826 port->mstb->num_ports == 2) { in drm_dp_mst_is_virtual_dpcd()
5827 list_for_each_entry(downstream_port, &port->mstb->ports, next) { in drm_dp_mst_is_virtual_dpcd()
5828 if (downstream_port->pdt == DP_PEER_DEVICE_SST_SINK && in drm_dp_mst_is_virtual_dpcd()
5829 !downstream_port->input) { in drm_dp_mst_is_virtual_dpcd()
5830 mutex_unlock(&port->mgr->lock); in drm_dp_mst_is_virtual_dpcd()
5835 mutex_unlock(&port->mgr->lock); in drm_dp_mst_is_virtual_dpcd()
5841 * drm_dp_mst_dsc_aux_for_port() - Find the correct aux for DSC
5867 if (port->parent->port_parent) in drm_dp_mst_dsc_aux_for_port()
5868 immediate_upstream_port = port->parent->port_parent; in drm_dp_mst_dsc_aux_for_port()
5879 !fec_port->fec_capable) in drm_dp_mst_dsc_aux_for_port()
5882 fec_port = fec_port->parent->port_parent; in drm_dp_mst_dsc_aux_for_port()
5885 /* DP-to-DP peer device */ in drm_dp_mst_dsc_aux_for_port()
5889 if (drm_dp_dpcd_read(&port->aux, in drm_dp_mst_dsc_aux_for_port()
5892 if (drm_dp_dpcd_read(&port->aux, in drm_dp_mst_dsc_aux_for_port()
5895 if (drm_dp_dpcd_read(&immediate_upstream_port->aux, in drm_dp_mst_dsc_aux_for_port()
5899 /* Enpoint decompression with DP-to-DP peer device */ in drm_dp_mst_dsc_aux_for_port()
5903 return &port->aux; in drm_dp_mst_dsc_aux_for_port()
5905 /* Virtual DPCD decompression with DP-to-DP peer device */ in drm_dp_mst_dsc_aux_for_port()
5906 return &immediate_upstream_port->aux; in drm_dp_mst_dsc_aux_for_port()
5909 /* Virtual DPCD decompression with DP-to-HDMI or Virtual DP Sink */ in drm_dp_mst_dsc_aux_for_port()
5911 return &port->aux; in drm_dp_mst_dsc_aux_for_port()
5916 * - Physical aux has Synaptics OUI in drm_dp_mst_dsc_aux_for_port()
5917 * - DPv1.4 or higher in drm_dp_mst_dsc_aux_for_port()
5918 * - Port is on primary branch device in drm_dp_mst_dsc_aux_for_port()
5919 * - Not a VGA adapter (DP_DWN_STRM_PORT_TYPE_ANALOG) in drm_dp_mst_dsc_aux_for_port()
5921 if (drm_dp_read_desc(port->mgr->aux, &desc, true)) in drm_dp_mst_dsc_aux_for_port()
5925 port->mgr->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14 && in drm_dp_mst_dsc_aux_for_port()
5926 port->parent == port->mgr->mst_primary) { in drm_dp_mst_dsc_aux_for_port()
5929 if (drm_dp_read_dpcd_caps(port->mgr->aux, dpcd_ext) < 0) in drm_dp_mst_dsc_aux_for_port()
5935 return port->mgr->aux; in drm_dp_mst_dsc_aux_for_port()
5940 * connected to the GPU is capable of DSC - in drm_dp_mst_dsc_aux_for_port()
5944 if (drm_dp_dpcd_read(&port->aux, in drm_dp_mst_dsc_aux_for_port()
5947 if (drm_dp_dpcd_read(&port->aux, in drm_dp_mst_dsc_aux_for_port()
5952 return &port->aux; in drm_dp_mst_dsc_aux_for_port()