1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26 #include <linux/string.h>
27 #include <linux/acpi.h>
28 #include <linux/version.h>
29 #include <linux/i2c.h>
30
31 #include <drm/drmP.h>
32 #include <drm/drm_crtc_helper.h>
33 #include <drm/amdgpu_drm.h>
34 #include <drm/drm_edid.h>
35
36 #include "dm_services.h"
37 #include "amdgpu.h"
38 #include "dc.h"
39 #include "amdgpu_dm.h"
40 #include "amdgpu_dm_irq.h"
41
42 #include "dm_helpers.h"
43
44 /* dm_helpers_parse_edid_caps
45 *
46 * Parse edid caps
47 *
48 * @edid: [in] pointer to edid
49 * edid_caps: [in] pointer to edid caps
50 * @return
51 * void
52 * */
dm_helpers_parse_edid_caps(struct dc_context * ctx,const struct dc_edid * edid,struct dc_edid_caps * edid_caps)53 enum dc_edid_status dm_helpers_parse_edid_caps(
54 struct dc_context *ctx,
55 const struct dc_edid *edid,
56 struct dc_edid_caps *edid_caps)
57 {
58 struct edid *edid_buf = (struct edid *) edid->raw_edid;
59 struct cea_sad *sads;
60 int sad_count = -1;
61 int sadb_count = -1;
62 int i = 0;
63 int j = 0;
64 uint8_t *sadb = NULL;
65
66 enum dc_edid_status result = EDID_OK;
67
68 if (!edid_caps || !edid)
69 return EDID_BAD_INPUT;
70
71 if (!drm_edid_is_valid(edid_buf))
72 result = EDID_BAD_CHECKSUM;
73
74 edid_caps->manufacturer_id = (uint16_t) edid_buf->mfg_id[0] |
75 ((uint16_t) edid_buf->mfg_id[1])<<8;
76 edid_caps->product_id = (uint16_t) edid_buf->prod_code[0] |
77 ((uint16_t) edid_buf->prod_code[1])<<8;
78 edid_caps->serial_number = edid_buf->serial;
79 edid_caps->manufacture_week = edid_buf->mfg_week;
80 edid_caps->manufacture_year = edid_buf->mfg_year;
81
82 /* One of the four detailed_timings stores the monitor name. It's
83 * stored in an array of length 13. */
84 for (i = 0; i < 4; i++) {
85 if (edid_buf->detailed_timings[i].data.other_data.type == 0xfc) {
86 while (j < 13 && edid_buf->detailed_timings[i].data.other_data.data.str.str[j]) {
87 if (edid_buf->detailed_timings[i].data.other_data.data.str.str[j] == '\n')
88 break;
89
90 edid_caps->display_name[j] =
91 edid_buf->detailed_timings[i].data.other_data.data.str.str[j];
92 j++;
93 }
94 }
95 }
96
97 edid_caps->edid_hdmi = drm_detect_hdmi_monitor(
98 (struct edid *) edid->raw_edid);
99
100 sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads);
101 if (sad_count <= 0) {
102 DRM_INFO("SADs count is: %d, don't need to read it\n",
103 sad_count);
104 return result;
105 }
106
107 edid_caps->audio_mode_count = sad_count < DC_MAX_AUDIO_DESC_COUNT ? sad_count : DC_MAX_AUDIO_DESC_COUNT;
108 for (i = 0; i < edid_caps->audio_mode_count; ++i) {
109 struct cea_sad *sad = &sads[i];
110
111 edid_caps->audio_modes[i].format_code = sad->format;
112 edid_caps->audio_modes[i].channel_count = sad->channels + 1;
113 edid_caps->audio_modes[i].sample_rate = sad->freq;
114 edid_caps->audio_modes[i].sample_size = sad->byte2;
115 }
116
117 sadb_count = drm_edid_to_speaker_allocation((struct edid *) edid->raw_edid, &sadb);
118
119 if (sadb_count < 0) {
120 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sadb_count);
121 sadb_count = 0;
122 }
123
124 if (sadb_count)
125 edid_caps->speaker_flags = sadb[0];
126 else
127 edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION;
128
129 kfree(sads);
130 kfree(sadb);
131
132 return result;
133 }
134
get_payload_table(struct amdgpu_dm_connector * aconnector,struct dp_mst_stream_allocation_table * proposed_table)135 static void get_payload_table(
136 struct amdgpu_dm_connector *aconnector,
137 struct dp_mst_stream_allocation_table *proposed_table)
138 {
139 int i;
140 struct drm_dp_mst_topology_mgr *mst_mgr =
141 &aconnector->mst_port->mst_mgr;
142
143 mutex_lock(&mst_mgr->payload_lock);
144
145 proposed_table->stream_count = 0;
146
147 /* number of active streams */
148 for (i = 0; i < mst_mgr->max_payloads; i++) {
149 if (mst_mgr->payloads[i].num_slots == 0)
150 break; /* end of vcp_id table */
151
152 ASSERT(mst_mgr->payloads[i].payload_state !=
153 DP_PAYLOAD_DELETE_LOCAL);
154
155 if (mst_mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL ||
156 mst_mgr->payloads[i].payload_state ==
157 DP_PAYLOAD_REMOTE) {
158
159 struct dp_mst_stream_allocation *sa =
160 &proposed_table->stream_allocations[
161 proposed_table->stream_count];
162
163 sa->slot_count = mst_mgr->payloads[i].num_slots;
164 sa->vcp_id = mst_mgr->proposed_vcpis[i]->vcpi;
165 proposed_table->stream_count++;
166 }
167 }
168
169 mutex_unlock(&mst_mgr->payload_lock);
170 }
171
dm_helpers_dp_update_branch_info(struct dc_context * ctx,const struct dc_link * link)172 void dm_helpers_dp_update_branch_info(
173 struct dc_context *ctx,
174 const struct dc_link *link)
175 {}
176
177 /*
178 * Writes payload allocation table in immediate downstream device.
179 */
dm_helpers_dp_mst_write_payload_allocation_table(struct dc_context * ctx,const struct dc_stream_state * stream,struct dp_mst_stream_allocation_table * proposed_table,bool enable)180 bool dm_helpers_dp_mst_write_payload_allocation_table(
181 struct dc_context *ctx,
182 const struct dc_stream_state *stream,
183 struct dp_mst_stream_allocation_table *proposed_table,
184 bool enable)
185 {
186 struct amdgpu_dm_connector *aconnector;
187 struct drm_dp_mst_topology_mgr *mst_mgr;
188 struct drm_dp_mst_port *mst_port;
189 int slots = 0;
190 bool ret;
191 int clock;
192 int bpp = 0;
193 int pbn = 0;
194
195 aconnector = stream->sink->priv;
196
197 if (!aconnector || !aconnector->mst_port)
198 return false;
199
200 mst_mgr = &aconnector->mst_port->mst_mgr;
201
202 if (!mst_mgr->mst_state)
203 return false;
204
205 mst_port = aconnector->port;
206
207 if (enable) {
208 clock = stream->timing.pix_clk_khz;
209
210 switch (stream->timing.display_color_depth) {
211
212 case COLOR_DEPTH_666:
213 bpp = 6;
214 break;
215 case COLOR_DEPTH_888:
216 bpp = 8;
217 break;
218 case COLOR_DEPTH_101010:
219 bpp = 10;
220 break;
221 case COLOR_DEPTH_121212:
222 bpp = 12;
223 break;
224 case COLOR_DEPTH_141414:
225 bpp = 14;
226 break;
227 case COLOR_DEPTH_161616:
228 bpp = 16;
229 break;
230 default:
231 ASSERT(bpp != 0);
232 break;
233 }
234
235 bpp = bpp * 3;
236
237 /* TODO need to know link rate */
238
239 pbn = drm_dp_calc_pbn_mode(clock, bpp);
240
241 slots = drm_dp_find_vcpi_slots(mst_mgr, pbn);
242 ret = drm_dp_mst_allocate_vcpi(mst_mgr, mst_port, pbn, slots);
243
244 if (!ret)
245 return false;
246
247 } else {
248 drm_dp_mst_reset_vcpi_slots(mst_mgr, mst_port);
249 }
250
251 ret = drm_dp_update_payload_part1(mst_mgr);
252
253 /* mst_mgr->->payloads are VC payload notify MST branch using DPCD or
254 * AUX message. The sequence is slot 1-63 allocated sequence for each
255 * stream. AMD ASIC stream slot allocation should follow the same
256 * sequence. copy DRM MST allocation to dc */
257
258 get_payload_table(aconnector, proposed_table);
259
260 if (ret)
261 return false;
262
263 return true;
264 }
265
266
267 /*
268 * Clear payload allocation table before enable MST DP link.
269 */
dm_helpers_dp_mst_clear_payload_allocation_table(struct dc_context * ctx,const struct dc_link * link)270 void dm_helpers_dp_mst_clear_payload_allocation_table(
271 struct dc_context *ctx,
272 const struct dc_link *link)
273 {}
274
275 /*
276 * Polls for ACT (allocation change trigger) handled and sends
277 * ALLOCATE_PAYLOAD message.
278 */
dm_helpers_dp_mst_poll_for_allocation_change_trigger(struct dc_context * ctx,const struct dc_stream_state * stream)279 bool dm_helpers_dp_mst_poll_for_allocation_change_trigger(
280 struct dc_context *ctx,
281 const struct dc_stream_state *stream)
282 {
283 struct amdgpu_dm_connector *aconnector;
284 struct drm_dp_mst_topology_mgr *mst_mgr;
285 int ret;
286
287 aconnector = stream->sink->priv;
288
289 if (!aconnector || !aconnector->mst_port)
290 return false;
291
292 mst_mgr = &aconnector->mst_port->mst_mgr;
293
294 if (!mst_mgr->mst_state)
295 return false;
296
297 ret = drm_dp_check_act_status(mst_mgr);
298
299 if (ret)
300 return false;
301
302 return true;
303 }
304
dm_helpers_dp_mst_send_payload_allocation(struct dc_context * ctx,const struct dc_stream_state * stream,bool enable)305 bool dm_helpers_dp_mst_send_payload_allocation(
306 struct dc_context *ctx,
307 const struct dc_stream_state *stream,
308 bool enable)
309 {
310 struct amdgpu_dm_connector *aconnector;
311 struct drm_dp_mst_topology_mgr *mst_mgr;
312 struct drm_dp_mst_port *mst_port;
313 int ret;
314
315 aconnector = stream->sink->priv;
316
317 if (!aconnector || !aconnector->mst_port)
318 return false;
319
320 mst_port = aconnector->port;
321
322 mst_mgr = &aconnector->mst_port->mst_mgr;
323
324 if (!mst_mgr->mst_state)
325 return false;
326
327 ret = drm_dp_update_payload_part2(mst_mgr);
328
329 if (ret)
330 return false;
331
332 if (!enable)
333 drm_dp_mst_deallocate_vcpi(mst_mgr, mst_port);
334
335 return true;
336 }
337
dm_dtn_log_begin(struct dc_context * ctx)338 void dm_dtn_log_begin(struct dc_context *ctx)
339 {}
340
dm_dtn_log_append_v(struct dc_context * ctx,const char * pMsg,...)341 void dm_dtn_log_append_v(struct dc_context *ctx,
342 const char *pMsg, ...)
343 {}
344
dm_dtn_log_end(struct dc_context * ctx)345 void dm_dtn_log_end(struct dc_context *ctx)
346 {}
347
dm_helpers_dp_mst_start_top_mgr(struct dc_context * ctx,const struct dc_link * link,bool boot)348 bool dm_helpers_dp_mst_start_top_mgr(
349 struct dc_context *ctx,
350 const struct dc_link *link,
351 bool boot)
352 {
353 struct amdgpu_dm_connector *aconnector = link->priv;
354
355 if (!aconnector) {
356 DRM_ERROR("Failed to found connector for link!");
357 return false;
358 }
359
360 if (boot) {
361 DRM_INFO("DM_MST: Differing MST start on aconnector: %p [id: %d]\n",
362 aconnector, aconnector->base.base.id);
363 return true;
364 }
365
366 DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n",
367 aconnector, aconnector->base.base.id);
368
369 return (drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true) == 0);
370 }
371
dm_helpers_dp_mst_stop_top_mgr(struct dc_context * ctx,const struct dc_link * link)372 void dm_helpers_dp_mst_stop_top_mgr(
373 struct dc_context *ctx,
374 const struct dc_link *link)
375 {
376 struct amdgpu_dm_connector *aconnector = link->priv;
377
378 if (!aconnector) {
379 DRM_ERROR("Failed to found connector for link!");
380 return;
381 }
382
383 DRM_INFO("DM_MST: stopping TM on aconnector: %p [id: %d]\n",
384 aconnector, aconnector->base.base.id);
385
386 if (aconnector->mst_mgr.mst_state == true)
387 drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, false);
388 }
389
dm_helpers_dp_read_dpcd(struct dc_context * ctx,const struct dc_link * link,uint32_t address,uint8_t * data,uint32_t size)390 bool dm_helpers_dp_read_dpcd(
391 struct dc_context *ctx,
392 const struct dc_link *link,
393 uint32_t address,
394 uint8_t *data,
395 uint32_t size)
396 {
397
398 struct amdgpu_dm_connector *aconnector = link->priv;
399
400 if (!aconnector) {
401 DRM_ERROR("Failed to found connector for link!");
402 return false;
403 }
404
405 return drm_dp_dpcd_read(&aconnector->dm_dp_aux.aux, address,
406 data, size) > 0;
407 }
408
dm_helpers_dp_write_dpcd(struct dc_context * ctx,const struct dc_link * link,uint32_t address,const uint8_t * data,uint32_t size)409 bool dm_helpers_dp_write_dpcd(
410 struct dc_context *ctx,
411 const struct dc_link *link,
412 uint32_t address,
413 const uint8_t *data,
414 uint32_t size)
415 {
416 struct amdgpu_dm_connector *aconnector = link->priv;
417
418 if (!aconnector) {
419 DRM_ERROR("Failed to found connector for link!");
420 return false;
421 }
422
423 return drm_dp_dpcd_write(&aconnector->dm_dp_aux.aux,
424 address, (uint8_t *)data, size) > 0;
425 }
426
dm_helpers_submit_i2c(struct dc_context * ctx,const struct dc_link * link,struct i2c_command * cmd)427 bool dm_helpers_submit_i2c(
428 struct dc_context *ctx,
429 const struct dc_link *link,
430 struct i2c_command *cmd)
431 {
432 struct amdgpu_dm_connector *aconnector = link->priv;
433 struct i2c_msg *msgs;
434 int i = 0;
435 int num = cmd->number_of_payloads;
436 bool result;
437
438 if (!aconnector) {
439 DRM_ERROR("Failed to found connector for link!");
440 return false;
441 }
442
443 msgs = kcalloc(num, sizeof(struct i2c_msg), GFP_KERNEL);
444
445 if (!msgs)
446 return false;
447
448 for (i = 0; i < num; i++) {
449 msgs[i].flags = cmd->payloads[i].write ? 0 : I2C_M_RD;
450 msgs[i].addr = cmd->payloads[i].address;
451 msgs[i].len = cmd->payloads[i].length;
452 msgs[i].buf = cmd->payloads[i].data;
453 }
454
455 result = i2c_transfer(&aconnector->i2c->base, msgs, num) == num;
456
457 kfree(msgs);
458
459 return result;
460 }
461
dm_helpers_is_dp_sink_present(struct dc_link * link)462 bool dm_helpers_is_dp_sink_present(struct dc_link *link)
463 {
464 bool dp_sink_present;
465 struct amdgpu_dm_connector *aconnector = link->priv;
466
467 if (!aconnector) {
468 BUG_ON("Failed to found connector for link!");
469 return true;
470 }
471
472 mutex_lock(&aconnector->dm_dp_aux.aux.hw_mutex);
473 dp_sink_present = dc_link_is_dp_sink_present(link);
474 mutex_unlock(&aconnector->dm_dp_aux.aux.hw_mutex);
475 return dp_sink_present;
476 }
477
dm_helpers_read_local_edid(struct dc_context * ctx,struct dc_link * link,struct dc_sink * sink)478 enum dc_edid_status dm_helpers_read_local_edid(
479 struct dc_context *ctx,
480 struct dc_link *link,
481 struct dc_sink *sink)
482 {
483 struct amdgpu_dm_connector *aconnector = link->priv;
484 struct i2c_adapter *ddc;
485 int retry = 3;
486 enum dc_edid_status edid_status;
487 struct edid *edid;
488
489 if (link->aux_mode)
490 ddc = &aconnector->dm_dp_aux.aux.ddc;
491 else
492 ddc = &aconnector->i2c->base;
493
494 /* some dongles read edid incorrectly the first time,
495 * do check sum and retry to make sure read correct edid.
496 */
497 do {
498
499 edid = drm_get_edid(&aconnector->base, ddc);
500
501 if (!edid)
502 return EDID_NO_RESPONSE;
503
504 sink->dc_edid.length = EDID_LENGTH * (edid->extensions + 1);
505 memmove(sink->dc_edid.raw_edid, (uint8_t *)edid, sink->dc_edid.length);
506
507 /* We don't need the original edid anymore */
508 kfree(edid);
509
510 edid_status = dm_helpers_parse_edid_caps(
511 ctx,
512 &sink->dc_edid,
513 &sink->edid_caps);
514
515 } while (edid_status == EDID_BAD_CHECKSUM && --retry > 0);
516
517 if (edid_status != EDID_OK)
518 DRM_ERROR("EDID err: %d, on connector: %s",
519 edid_status,
520 aconnector->base.name);
521 if (link->aux_mode) {
522 union test_request test_request = { {0} };
523 union test_response test_response = { {0} };
524
525 dm_helpers_dp_read_dpcd(ctx,
526 link,
527 DP_TEST_REQUEST,
528 &test_request.raw,
529 sizeof(union test_request));
530
531 if (!test_request.bits.EDID_READ)
532 return edid_status;
533
534 test_response.bits.EDID_CHECKSUM_WRITE = 1;
535
536 dm_helpers_dp_write_dpcd(ctx,
537 link,
538 DP_TEST_EDID_CHECKSUM,
539 &sink->dc_edid.raw_edid[sink->dc_edid.length-1],
540 1);
541
542 dm_helpers_dp_write_dpcd(ctx,
543 link,
544 DP_TEST_RESPONSE,
545 &test_response.raw,
546 sizeof(test_response));
547
548 }
549
550 return edid_status;
551 }
552
dm_set_dcn_clocks(struct dc_context * ctx,struct dc_clocks * clks)553 void dm_set_dcn_clocks(struct dc_context *ctx, struct dc_clocks *clks)
554 {
555 /* TODO: something */
556 }
557