1 /* SPDX-License-Identifier: MIT */
2 /*
3 * Copyright (C) 2017 Google, Inc.
4 * Copyright _ 2017-2019, Intel Corporation.
5 *
6 * Authors:
7 * Sean Paul <seanpaul@chromium.org>
8 * Ramalingam C <ramalingam.c@intel.com>
9 */
10
11 #include <linux/component.h>
12 #include <linux/i2c.h>
13 #include <linux/random.h>
14
15 #include <drm/drm_hdcp.h>
16 #include <drm/i915_component.h>
17
18 #include "i915_reg.h"
19 #include "intel_display_power.h"
20 #include "intel_display_types.h"
21 #include "intel_hdcp.h"
22 #include "intel_sideband.h"
23 #include "intel_connector.h"
24
25 #define KEY_LOAD_TRIES 5
26 #define ENCRYPT_STATUS_CHANGE_TIMEOUT_MS 50
27 #define HDCP2_LC_RETRY_CNT 3
28
29 static
intel_hdcp_is_ksv_valid(u8 * ksv)30 bool intel_hdcp_is_ksv_valid(u8 *ksv)
31 {
32 int i, ones = 0;
33 /* KSV has 20 1's and 20 0's */
34 for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
35 ones += hweight8(ksv[i]);
36 if (ones != 20)
37 return false;
38
39 return true;
40 }
41
42 static
intel_hdcp_read_valid_bksv(struct intel_digital_port * dig_port,const struct intel_hdcp_shim * shim,u8 * bksv)43 int intel_hdcp_read_valid_bksv(struct intel_digital_port *dig_port,
44 const struct intel_hdcp_shim *shim, u8 *bksv)
45 {
46 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
47 int ret, i, tries = 2;
48
49 /* HDCP spec states that we must retry the bksv if it is invalid */
50 for (i = 0; i < tries; i++) {
51 ret = shim->read_bksv(dig_port, bksv);
52 if (ret)
53 return ret;
54 if (intel_hdcp_is_ksv_valid(bksv))
55 break;
56 }
57 if (i == tries) {
58 drm_dbg_kms(&i915->drm, "Bksv is invalid\n");
59 return -ENODEV;
60 }
61
62 return 0;
63 }
64
65 /* Is HDCP1.4 capable on Platform and Sink */
intel_hdcp_capable(struct intel_connector * connector)66 bool intel_hdcp_capable(struct intel_connector *connector)
67 {
68 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
69 const struct intel_hdcp_shim *shim = connector->hdcp.shim;
70 bool capable = false;
71 u8 bksv[5];
72
73 if (!shim)
74 return capable;
75
76 if (shim->hdcp_capable) {
77 shim->hdcp_capable(dig_port, &capable);
78 } else {
79 if (!intel_hdcp_read_valid_bksv(dig_port, shim, bksv))
80 capable = true;
81 }
82
83 return capable;
84 }
85
86 /* Is HDCP2.2 capable on Platform and Sink */
intel_hdcp2_capable(struct intel_connector * connector)87 bool intel_hdcp2_capable(struct intel_connector *connector)
88 {
89 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
90 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
91 struct intel_hdcp *hdcp = &connector->hdcp;
92 bool capable = false;
93
94 /* I915 support for HDCP2.2 */
95 if (!hdcp->hdcp2_supported)
96 return false;
97
98 /* MEI interface is solid */
99 mutex_lock(&dev_priv->hdcp_comp_mutex);
100 if (!dev_priv->hdcp_comp_added || !dev_priv->hdcp_master) {
101 mutex_unlock(&dev_priv->hdcp_comp_mutex);
102 return false;
103 }
104 mutex_unlock(&dev_priv->hdcp_comp_mutex);
105
106 /* Sink's capability for HDCP2.2 */
107 hdcp->shim->hdcp_2_2_capable(dig_port, &capable);
108
109 return capable;
110 }
111
intel_hdcp_in_use(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder,enum port port)112 static bool intel_hdcp_in_use(struct drm_i915_private *dev_priv,
113 enum transcoder cpu_transcoder, enum port port)
114 {
115 return intel_de_read(dev_priv,
116 HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
117 HDCP_STATUS_ENC;
118 }
119
intel_hdcp2_in_use(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder,enum port port)120 static bool intel_hdcp2_in_use(struct drm_i915_private *dev_priv,
121 enum transcoder cpu_transcoder, enum port port)
122 {
123 return intel_de_read(dev_priv,
124 HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
125 LINK_ENCRYPTION_STATUS;
126 }
127
intel_hdcp_poll_ksv_fifo(struct intel_digital_port * dig_port,const struct intel_hdcp_shim * shim)128 static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port,
129 const struct intel_hdcp_shim *shim)
130 {
131 int ret, read_ret;
132 bool ksv_ready;
133
134 /* Poll for ksv list ready (spec says max time allowed is 5s) */
135 ret = __wait_for(read_ret = shim->read_ksv_ready(dig_port,
136 &ksv_ready),
137 read_ret || ksv_ready, 5 * 1000 * 1000, 1000,
138 100 * 1000);
139 if (ret)
140 return ret;
141 if (read_ret)
142 return read_ret;
143 if (!ksv_ready)
144 return -ETIMEDOUT;
145
146 return 0;
147 }
148
hdcp_key_loadable(struct drm_i915_private * dev_priv)149 static bool hdcp_key_loadable(struct drm_i915_private *dev_priv)
150 {
151 enum i915_power_well_id id;
152 intel_wakeref_t wakeref;
153 bool enabled = false;
154
155 /*
156 * On HSW and BDW, Display HW loads the Key as soon as Display resumes.
157 * On all BXT+, SW can load the keys only when the PW#1 is turned on.
158 */
159 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
160 id = HSW_DISP_PW_GLOBAL;
161 else
162 id = SKL_DISP_PW_1;
163
164 /* PG1 (power well #1) needs to be enabled */
165 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
166 enabled = intel_display_power_well_is_enabled(dev_priv, id);
167
168 /*
169 * Another req for hdcp key loadability is enabled state of pll for
170 * cdclk. Without active crtc we wont land here. So we are assuming that
171 * cdclk is already on.
172 */
173
174 return enabled;
175 }
176
intel_hdcp_clear_keys(struct drm_i915_private * dev_priv)177 static void intel_hdcp_clear_keys(struct drm_i915_private *dev_priv)
178 {
179 intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
180 intel_de_write(dev_priv, HDCP_KEY_STATUS,
181 HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS | HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE);
182 }
183
intel_hdcp_load_keys(struct drm_i915_private * dev_priv)184 static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
185 {
186 int ret;
187 u32 val;
188
189 val = intel_de_read(dev_priv, HDCP_KEY_STATUS);
190 if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS))
191 return 0;
192
193 /*
194 * On HSW and BDW HW loads the HDCP1.4 Key when Display comes
195 * out of reset. So if Key is not already loaded, its an error state.
196 */
197 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
198 if (!(intel_de_read(dev_priv, HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
199 return -ENXIO;
200
201 /*
202 * Initiate loading the HDCP key from fuses.
203 *
204 * BXT+ platforms, HDCP key needs to be loaded by SW. Only Gen 9
205 * platforms except BXT and GLK, differ in the key load trigger process
206 * from other platforms. So GEN9_BC uses the GT Driver Mailbox i/f.
207 */
208 if (IS_GEN9_BC(dev_priv)) {
209 ret = sandybridge_pcode_write(dev_priv,
210 SKL_PCODE_LOAD_HDCP_KEYS, 1);
211 if (ret) {
212 drm_err(&dev_priv->drm,
213 "Failed to initiate HDCP key load (%d)\n",
214 ret);
215 return ret;
216 }
217 } else {
218 intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
219 }
220
221 /* Wait for the keys to load (500us) */
222 ret = __intel_wait_for_register(&dev_priv->uncore, HDCP_KEY_STATUS,
223 HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
224 10, 1, &val);
225 if (ret)
226 return ret;
227 else if (!(val & HDCP_KEY_LOAD_STATUS))
228 return -ENXIO;
229
230 /* Send Aksv over to PCH display for use in authentication */
231 intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);
232
233 return 0;
234 }
235
236 /* Returns updated SHA-1 index */
intel_write_sha_text(struct drm_i915_private * dev_priv,u32 sha_text)237 static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
238 {
239 intel_de_write(dev_priv, HDCP_SHA_TEXT, sha_text);
240 if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) {
241 drm_err(&dev_priv->drm, "Timed out waiting for SHA1 ready\n");
242 return -ETIMEDOUT;
243 }
244 return 0;
245 }
246
247 static
intel_hdcp_get_repeater_ctl(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder,enum port port)248 u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *dev_priv,
249 enum transcoder cpu_transcoder, enum port port)
250 {
251 if (INTEL_GEN(dev_priv) >= 12) {
252 switch (cpu_transcoder) {
253 case TRANSCODER_A:
254 return HDCP_TRANSA_REP_PRESENT |
255 HDCP_TRANSA_SHA1_M0;
256 case TRANSCODER_B:
257 return HDCP_TRANSB_REP_PRESENT |
258 HDCP_TRANSB_SHA1_M0;
259 case TRANSCODER_C:
260 return HDCP_TRANSC_REP_PRESENT |
261 HDCP_TRANSC_SHA1_M0;
262 case TRANSCODER_D:
263 return HDCP_TRANSD_REP_PRESENT |
264 HDCP_TRANSD_SHA1_M0;
265 default:
266 drm_err(&dev_priv->drm, "Unknown transcoder %d\n",
267 cpu_transcoder);
268 return -EINVAL;
269 }
270 }
271
272 switch (port) {
273 case PORT_A:
274 return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0;
275 case PORT_B:
276 return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0;
277 case PORT_C:
278 return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0;
279 case PORT_D:
280 return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0;
281 case PORT_E:
282 return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
283 default:
284 drm_err(&dev_priv->drm, "Unknown port %d\n", port);
285 return -EINVAL;
286 }
287 }
288
289 static
intel_hdcp_validate_v_prime(struct intel_connector * connector,const struct intel_hdcp_shim * shim,u8 * ksv_fifo,u8 num_downstream,u8 * bstatus)290 int intel_hdcp_validate_v_prime(struct intel_connector *connector,
291 const struct intel_hdcp_shim *shim,
292 u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
293 {
294 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
295 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
296 enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
297 enum port port = dig_port->base.port;
298 u32 vprime, sha_text, sha_leftovers, rep_ctl;
299 int ret, i, j, sha_idx;
300
301 /* Process V' values from the receiver */
302 for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
303 ret = shim->read_v_prime_part(dig_port, i, &vprime);
304 if (ret)
305 return ret;
306 intel_de_write(dev_priv, HDCP_SHA_V_PRIME(i), vprime);
307 }
308
309 /*
310 * We need to write the concatenation of all device KSVs, BINFO (DP) ||
311 * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte
312 * stream is written via the HDCP_SHA_TEXT register in 32-bit
313 * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This
314 * index will keep track of our progress through the 64 bytes as well as
315 * helping us work the 40-bit KSVs through our 32-bit register.
316 *
317 * NOTE: data passed via HDCP_SHA_TEXT should be big-endian
318 */
319 sha_idx = 0;
320 sha_text = 0;
321 sha_leftovers = 0;
322 rep_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port);
323 intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
324 for (i = 0; i < num_downstream; i++) {
325 unsigned int sha_empty;
326 u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN];
327
328 /* Fill up the empty slots in sha_text and write it out */
329 sha_empty = sizeof(sha_text) - sha_leftovers;
330 for (j = 0; j < sha_empty; j++) {
331 u8 off = ((sizeof(sha_text) - j - 1 - sha_leftovers) * 8);
332 sha_text |= ksv[j] << off;
333 }
334
335 ret = intel_write_sha_text(dev_priv, sha_text);
336 if (ret < 0)
337 return ret;
338
339 /* Programming guide writes this every 64 bytes */
340 sha_idx += sizeof(sha_text);
341 if (!(sha_idx % 64))
342 intel_de_write(dev_priv, HDCP_REP_CTL,
343 rep_ctl | HDCP_SHA1_TEXT_32);
344
345 /* Store the leftover bytes from the ksv in sha_text */
346 sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty;
347 sha_text = 0;
348 for (j = 0; j < sha_leftovers; j++)
349 sha_text |= ksv[sha_empty + j] <<
350 ((sizeof(sha_text) - j - 1) * 8);
351
352 /*
353 * If we still have room in sha_text for more data, continue.
354 * Otherwise, write it out immediately.
355 */
356 if (sizeof(sha_text) > sha_leftovers)
357 continue;
358
359 ret = intel_write_sha_text(dev_priv, sha_text);
360 if (ret < 0)
361 return ret;
362 sha_leftovers = 0;
363 sha_text = 0;
364 sha_idx += sizeof(sha_text);
365 }
366
367 /*
368 * We need to write BINFO/BSTATUS, and M0 now. Depending on how many
369 * bytes are leftover from the last ksv, we might be able to fit them
370 * all in sha_text (first 2 cases), or we might need to split them up
371 * into 2 writes (last 2 cases).
372 */
373 if (sha_leftovers == 0) {
374 /* Write 16 bits of text, 16 bits of M0 */
375 intel_de_write(dev_priv, HDCP_REP_CTL,
376 rep_ctl | HDCP_SHA1_TEXT_16);
377 ret = intel_write_sha_text(dev_priv,
378 bstatus[0] << 8 | bstatus[1]);
379 if (ret < 0)
380 return ret;
381 sha_idx += sizeof(sha_text);
382
383 /* Write 32 bits of M0 */
384 intel_de_write(dev_priv, HDCP_REP_CTL,
385 rep_ctl | HDCP_SHA1_TEXT_0);
386 ret = intel_write_sha_text(dev_priv, 0);
387 if (ret < 0)
388 return ret;
389 sha_idx += sizeof(sha_text);
390
391 /* Write 16 bits of M0 */
392 intel_de_write(dev_priv, HDCP_REP_CTL,
393 rep_ctl | HDCP_SHA1_TEXT_16);
394 ret = intel_write_sha_text(dev_priv, 0);
395 if (ret < 0)
396 return ret;
397 sha_idx += sizeof(sha_text);
398
399 } else if (sha_leftovers == 1) {
400 /* Write 24 bits of text, 8 bits of M0 */
401 intel_de_write(dev_priv, HDCP_REP_CTL,
402 rep_ctl | HDCP_SHA1_TEXT_24);
403 sha_text |= bstatus[0] << 16 | bstatus[1] << 8;
404 /* Only 24-bits of data, must be in the LSB */
405 sha_text = (sha_text & 0xffffff00) >> 8;
406 ret = intel_write_sha_text(dev_priv, sha_text);
407 if (ret < 0)
408 return ret;
409 sha_idx += sizeof(sha_text);
410
411 /* Write 32 bits of M0 */
412 intel_de_write(dev_priv, HDCP_REP_CTL,
413 rep_ctl | HDCP_SHA1_TEXT_0);
414 ret = intel_write_sha_text(dev_priv, 0);
415 if (ret < 0)
416 return ret;
417 sha_idx += sizeof(sha_text);
418
419 /* Write 24 bits of M0 */
420 intel_de_write(dev_priv, HDCP_REP_CTL,
421 rep_ctl | HDCP_SHA1_TEXT_8);
422 ret = intel_write_sha_text(dev_priv, 0);
423 if (ret < 0)
424 return ret;
425 sha_idx += sizeof(sha_text);
426
427 } else if (sha_leftovers == 2) {
428 /* Write 32 bits of text */
429 intel_de_write(dev_priv, HDCP_REP_CTL,
430 rep_ctl | HDCP_SHA1_TEXT_32);
431 sha_text |= bstatus[0] << 8 | bstatus[1];
432 ret = intel_write_sha_text(dev_priv, sha_text);
433 if (ret < 0)
434 return ret;
435 sha_idx += sizeof(sha_text);
436
437 /* Write 64 bits of M0 */
438 intel_de_write(dev_priv, HDCP_REP_CTL,
439 rep_ctl | HDCP_SHA1_TEXT_0);
440 for (i = 0; i < 2; i++) {
441 ret = intel_write_sha_text(dev_priv, 0);
442 if (ret < 0)
443 return ret;
444 sha_idx += sizeof(sha_text);
445 }
446
447 /*
448 * Terminate the SHA-1 stream by hand. For the other leftover
449 * cases this is appended by the hardware.
450 */
451 intel_de_write(dev_priv, HDCP_REP_CTL,
452 rep_ctl | HDCP_SHA1_TEXT_32);
453 sha_text = DRM_HDCP_SHA1_TERMINATOR << 24;
454 ret = intel_write_sha_text(dev_priv, sha_text);
455 if (ret < 0)
456 return ret;
457 sha_idx += sizeof(sha_text);
458 } else if (sha_leftovers == 3) {
459 /* Write 32 bits of text (filled from LSB) */
460 intel_de_write(dev_priv, HDCP_REP_CTL,
461 rep_ctl | HDCP_SHA1_TEXT_32);
462 sha_text |= bstatus[0];
463 ret = intel_write_sha_text(dev_priv, sha_text);
464 if (ret < 0)
465 return ret;
466 sha_idx += sizeof(sha_text);
467
468 /* Write 8 bits of text (filled from LSB), 24 bits of M0 */
469 intel_de_write(dev_priv, HDCP_REP_CTL,
470 rep_ctl | HDCP_SHA1_TEXT_8);
471 ret = intel_write_sha_text(dev_priv, bstatus[1]);
472 if (ret < 0)
473 return ret;
474 sha_idx += sizeof(sha_text);
475
476 /* Write 32 bits of M0 */
477 intel_de_write(dev_priv, HDCP_REP_CTL,
478 rep_ctl | HDCP_SHA1_TEXT_0);
479 ret = intel_write_sha_text(dev_priv, 0);
480 if (ret < 0)
481 return ret;
482 sha_idx += sizeof(sha_text);
483
484 /* Write 8 bits of M0 */
485 intel_de_write(dev_priv, HDCP_REP_CTL,
486 rep_ctl | HDCP_SHA1_TEXT_24);
487 ret = intel_write_sha_text(dev_priv, 0);
488 if (ret < 0)
489 return ret;
490 sha_idx += sizeof(sha_text);
491 } else {
492 drm_dbg_kms(&dev_priv->drm, "Invalid number of leftovers %d\n",
493 sha_leftovers);
494 return -EINVAL;
495 }
496
497 intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
498 /* Fill up to 64-4 bytes with zeros (leave the last write for length) */
499 while ((sha_idx % 64) < (64 - sizeof(sha_text))) {
500 ret = intel_write_sha_text(dev_priv, 0);
501 if (ret < 0)
502 return ret;
503 sha_idx += sizeof(sha_text);
504 }
505
506 /*
507 * Last write gets the length of the concatenation in bits. That is:
508 * - 5 bytes per device
509 * - 10 bytes for BINFO/BSTATUS(2), M0(8)
510 */
511 sha_text = (num_downstream * 5 + 10) * 8;
512 ret = intel_write_sha_text(dev_priv, sha_text);
513 if (ret < 0)
514 return ret;
515
516 /* Tell the HW we're done with the hash and wait for it to ACK */
517 intel_de_write(dev_priv, HDCP_REP_CTL,
518 rep_ctl | HDCP_SHA1_COMPLETE_HASH);
519 if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL,
520 HDCP_SHA1_COMPLETE, 1)) {
521 drm_err(&dev_priv->drm, "Timed out waiting for SHA1 complete\n");
522 return -ETIMEDOUT;
523 }
524 if (!(intel_de_read(dev_priv, HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
525 drm_dbg_kms(&dev_priv->drm, "SHA-1 mismatch, HDCP failed\n");
526 return -ENXIO;
527 }
528
529 return 0;
530 }
531
532 /* Implements Part 2 of the HDCP authorization procedure */
533 static
intel_hdcp_auth_downstream(struct intel_connector * connector)534 int intel_hdcp_auth_downstream(struct intel_connector *connector)
535 {
536 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
537 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
538 const struct intel_hdcp_shim *shim = connector->hdcp.shim;
539 u8 bstatus[2], num_downstream, *ksv_fifo;
540 int ret, i, tries = 3;
541
542 ret = intel_hdcp_poll_ksv_fifo(dig_port, shim);
543 if (ret) {
544 drm_dbg_kms(&dev_priv->drm,
545 "KSV list failed to become ready (%d)\n", ret);
546 return ret;
547 }
548
549 ret = shim->read_bstatus(dig_port, bstatus);
550 if (ret)
551 return ret;
552
553 if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
554 DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
555 drm_dbg_kms(&dev_priv->drm, "Max Topology Limit Exceeded\n");
556 return -EPERM;
557 }
558
559 /*
560 * When repeater reports 0 device count, HDCP1.4 spec allows disabling
561 * the HDCP encryption. That implies that repeater can't have its own
562 * display. As there is no consumption of encrypted content in the
563 * repeater with 0 downstream devices, we are failing the
564 * authentication.
565 */
566 num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
567 if (num_downstream == 0) {
568 drm_dbg_kms(&dev_priv->drm,
569 "Repeater with zero downstream devices\n");
570 return -EINVAL;
571 }
572
573 ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
574 if (!ksv_fifo) {
575 drm_dbg_kms(&dev_priv->drm, "Out of mem: ksv_fifo\n");
576 return -ENOMEM;
577 }
578
579 ret = shim->read_ksv_fifo(dig_port, num_downstream, ksv_fifo);
580 if (ret)
581 goto err;
582
583 if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, ksv_fifo,
584 num_downstream) > 0) {
585 drm_err(&dev_priv->drm, "Revoked Ksv(s) in ksv_fifo\n");
586 ret = -EPERM;
587 goto err;
588 }
589
590 /*
591 * When V prime mismatches, DP Spec mandates re-read of
592 * V prime atleast twice.
593 */
594 for (i = 0; i < tries; i++) {
595 ret = intel_hdcp_validate_v_prime(connector, shim,
596 ksv_fifo, num_downstream,
597 bstatus);
598 if (!ret)
599 break;
600 }
601
602 if (i == tries) {
603 drm_dbg_kms(&dev_priv->drm,
604 "V Prime validation failed.(%d)\n", ret);
605 goto err;
606 }
607
608 drm_dbg_kms(&dev_priv->drm, "HDCP is enabled (%d downstream devices)\n",
609 num_downstream);
610 ret = 0;
611 err:
612 kfree(ksv_fifo);
613 return ret;
614 }
615
616 /* Implements Part 1 of the HDCP authorization procedure */
intel_hdcp_auth(struct intel_connector * connector)617 static int intel_hdcp_auth(struct intel_connector *connector)
618 {
619 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
620 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
621 struct intel_hdcp *hdcp = &connector->hdcp;
622 const struct intel_hdcp_shim *shim = hdcp->shim;
623 enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
624 enum port port = dig_port->base.port;
625 unsigned long r0_prime_gen_start;
626 int ret, i, tries = 2;
627 union {
628 u32 reg[2];
629 u8 shim[DRM_HDCP_AN_LEN];
630 } an;
631 union {
632 u32 reg[2];
633 u8 shim[DRM_HDCP_KSV_LEN];
634 } bksv;
635 union {
636 u32 reg;
637 u8 shim[DRM_HDCP_RI_LEN];
638 } ri;
639 bool repeater_present, hdcp_capable;
640
641 /*
642 * Detects whether the display is HDCP capable. Although we check for
643 * valid Bksv below, the HDCP over DP spec requires that we check
644 * whether the display supports HDCP before we write An. For HDMI
645 * displays, this is not necessary.
646 */
647 if (shim->hdcp_capable) {
648 ret = shim->hdcp_capable(dig_port, &hdcp_capable);
649 if (ret)
650 return ret;
651 if (!hdcp_capable) {
652 drm_dbg_kms(&dev_priv->drm,
653 "Panel is not HDCP capable\n");
654 return -EINVAL;
655 }
656 }
657
658 /* Initialize An with 2 random values and acquire it */
659 for (i = 0; i < 2; i++)
660 intel_de_write(dev_priv,
661 HDCP_ANINIT(dev_priv, cpu_transcoder, port),
662 get_random_u32());
663 intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port),
664 HDCP_CONF_CAPTURE_AN);
665
666 /* Wait for An to be acquired */
667 if (intel_de_wait_for_set(dev_priv,
668 HDCP_STATUS(dev_priv, cpu_transcoder, port),
669 HDCP_STATUS_AN_READY, 1)) {
670 drm_err(&dev_priv->drm, "Timed out waiting for An\n");
671 return -ETIMEDOUT;
672 }
673
674 an.reg[0] = intel_de_read(dev_priv,
675 HDCP_ANLO(dev_priv, cpu_transcoder, port));
676 an.reg[1] = intel_de_read(dev_priv,
677 HDCP_ANHI(dev_priv, cpu_transcoder, port));
678 ret = shim->write_an_aksv(dig_port, an.shim);
679 if (ret)
680 return ret;
681
682 r0_prime_gen_start = jiffies;
683
684 memset(&bksv, 0, sizeof(bksv));
685
686 ret = intel_hdcp_read_valid_bksv(dig_port, shim, bksv.shim);
687 if (ret < 0)
688 return ret;
689
690 if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, bksv.shim, 1) > 0) {
691 drm_err(&dev_priv->drm, "BKSV is revoked\n");
692 return -EPERM;
693 }
694
695 intel_de_write(dev_priv, HDCP_BKSVLO(dev_priv, cpu_transcoder, port),
696 bksv.reg[0]);
697 intel_de_write(dev_priv, HDCP_BKSVHI(dev_priv, cpu_transcoder, port),
698 bksv.reg[1]);
699
700 ret = shim->repeater_present(dig_port, &repeater_present);
701 if (ret)
702 return ret;
703 if (repeater_present)
704 intel_de_write(dev_priv, HDCP_REP_CTL,
705 intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port));
706
707 ret = shim->toggle_signalling(dig_port, cpu_transcoder, true);
708 if (ret)
709 return ret;
710
711 intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port),
712 HDCP_CONF_AUTH_AND_ENC);
713
714 /* Wait for R0 ready */
715 if (wait_for(intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
716 (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
717 drm_err(&dev_priv->drm, "Timed out waiting for R0 ready\n");
718 return -ETIMEDOUT;
719 }
720
721 /*
722 * Wait for R0' to become available. The spec says 100ms from Aksv, but
723 * some monitors can take longer than this. We'll set the timeout at
724 * 300ms just to be sure.
725 *
726 * On DP, there's an R0_READY bit available but no such bit
727 * exists on HDMI. Since the upper-bound is the same, we'll just do
728 * the stupid thing instead of polling on one and not the other.
729 */
730 wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300);
731
732 tries = 3;
733
734 /*
735 * DP HDCP Spec mandates the two more reattempt to read R0, incase
736 * of R0 mismatch.
737 */
738 for (i = 0; i < tries; i++) {
739 ri.reg = 0;
740 ret = shim->read_ri_prime(dig_port, ri.shim);
741 if (ret)
742 return ret;
743 intel_de_write(dev_priv,
744 HDCP_RPRIME(dev_priv, cpu_transcoder, port),
745 ri.reg);
746
747 /* Wait for Ri prime match */
748 if (!wait_for(intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
749 (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
750 break;
751 }
752
753 if (i == tries) {
754 drm_dbg_kms(&dev_priv->drm,
755 "Timed out waiting for Ri prime match (%x)\n",
756 intel_de_read(dev_priv, HDCP_STATUS(dev_priv,
757 cpu_transcoder, port)));
758 return -ETIMEDOUT;
759 }
760
761 /* Wait for encryption confirmation */
762 if (intel_de_wait_for_set(dev_priv,
763 HDCP_STATUS(dev_priv, cpu_transcoder, port),
764 HDCP_STATUS_ENC,
765 ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
766 drm_err(&dev_priv->drm, "Timed out waiting for encryption\n");
767 return -ETIMEDOUT;
768 }
769
770 /*
771 * XXX: If we have MST-connected devices, we need to enable encryption
772 * on those as well.
773 */
774
775 if (repeater_present)
776 return intel_hdcp_auth_downstream(connector);
777
778 drm_dbg_kms(&dev_priv->drm, "HDCP is enabled (no repeater present)\n");
779 return 0;
780 }
781
_intel_hdcp_disable(struct intel_connector * connector)782 static int _intel_hdcp_disable(struct intel_connector *connector)
783 {
784 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
785 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
786 struct intel_hdcp *hdcp = &connector->hdcp;
787 enum port port = dig_port->base.port;
788 enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
789 u32 repeater_ctl;
790 int ret;
791
792 drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being disabled...\n",
793 connector->base.name, connector->base.base.id);
794
795 /*
796 * If there are other connectors on this port using HDCP, don't disable
797 * it. Instead, toggle the HDCP signalling off on that particular
798 * connector/pipe and exit.
799 */
800 if (dig_port->num_hdcp_streams > 0) {
801 ret = hdcp->shim->toggle_signalling(dig_port,
802 cpu_transcoder, false);
803 if (ret)
804 DRM_ERROR("Failed to disable HDCP signalling\n");
805 return ret;
806 }
807
808 hdcp->hdcp_encrypted = false;
809 intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port), 0);
810 if (intel_de_wait_for_clear(dev_priv,
811 HDCP_STATUS(dev_priv, cpu_transcoder, port),
812 ~0, ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
813 drm_err(&dev_priv->drm,
814 "Failed to disable HDCP, timeout clearing status\n");
815 return -ETIMEDOUT;
816 }
817
818 repeater_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder,
819 port);
820 intel_de_write(dev_priv, HDCP_REP_CTL,
821 intel_de_read(dev_priv, HDCP_REP_CTL) & ~repeater_ctl);
822
823 ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, false);
824 if (ret) {
825 drm_err(&dev_priv->drm, "Failed to disable HDCP signalling\n");
826 return ret;
827 }
828
829 drm_dbg_kms(&dev_priv->drm, "HDCP is disabled\n");
830 return 0;
831 }
832
_intel_hdcp_enable(struct intel_connector * connector)833 static int _intel_hdcp_enable(struct intel_connector *connector)
834 {
835 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
836 struct intel_hdcp *hdcp = &connector->hdcp;
837 int i, ret, tries = 3;
838
839 drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being enabled...\n",
840 connector->base.name, connector->base.base.id);
841
842 if (!hdcp_key_loadable(dev_priv)) {
843 drm_err(&dev_priv->drm, "HDCP key Load is not possible\n");
844 return -ENXIO;
845 }
846
847 for (i = 0; i < KEY_LOAD_TRIES; i++) {
848 ret = intel_hdcp_load_keys(dev_priv);
849 if (!ret)
850 break;
851 intel_hdcp_clear_keys(dev_priv);
852 }
853 if (ret) {
854 drm_err(&dev_priv->drm, "Could not load HDCP keys, (%d)\n",
855 ret);
856 return ret;
857 }
858
859 /* Incase of authentication failures, HDCP spec expects reauth. */
860 for (i = 0; i < tries; i++) {
861 ret = intel_hdcp_auth(connector);
862 if (!ret) {
863 hdcp->hdcp_encrypted = true;
864 return 0;
865 }
866
867 drm_dbg_kms(&dev_priv->drm, "HDCP Auth failure (%d)\n", ret);
868
869 /* Ensuring HDCP encryption and signalling are stopped. */
870 _intel_hdcp_disable(connector);
871 }
872
873 drm_dbg_kms(&dev_priv->drm,
874 "HDCP authentication failed (%d tries/%d)\n", tries, ret);
875 return ret;
876 }
877
intel_hdcp_to_connector(struct intel_hdcp * hdcp)878 static struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
879 {
880 return container_of(hdcp, struct intel_connector, hdcp);
881 }
882
intel_hdcp_update_value(struct intel_connector * connector,u64 value,bool update_property)883 static void intel_hdcp_update_value(struct intel_connector *connector,
884 u64 value, bool update_property)
885 {
886 struct drm_device *dev = connector->base.dev;
887 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
888 struct intel_hdcp *hdcp = &connector->hdcp;
889
890 drm_WARN_ON(connector->base.dev, !mutex_is_locked(&hdcp->mutex));
891
892 if (hdcp->value == value)
893 return;
894
895 drm_WARN_ON(dev, !mutex_is_locked(&dig_port->hdcp_mutex));
896
897 if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
898 if (!drm_WARN_ON(dev, dig_port->num_hdcp_streams == 0))
899 dig_port->num_hdcp_streams--;
900 } else if (value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
901 dig_port->num_hdcp_streams++;
902 }
903
904 hdcp->value = value;
905 if (update_property) {
906 drm_connector_get(&connector->base);
907 schedule_work(&hdcp->prop_work);
908 }
909 }
910
911 /* Implements Part 3 of the HDCP authorization procedure */
intel_hdcp_check_link(struct intel_connector * connector)912 static int intel_hdcp_check_link(struct intel_connector *connector)
913 {
914 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
915 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
916 struct intel_hdcp *hdcp = &connector->hdcp;
917 enum port port = dig_port->base.port;
918 enum transcoder cpu_transcoder;
919 int ret = 0;
920
921 mutex_lock(&hdcp->mutex);
922 mutex_lock(&dig_port->hdcp_mutex);
923
924 cpu_transcoder = hdcp->cpu_transcoder;
925
926 /* Check_link valid only when HDCP1.4 is enabled */
927 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
928 !hdcp->hdcp_encrypted) {
929 ret = -EINVAL;
930 goto out;
931 }
932
933 if (drm_WARN_ON(&dev_priv->drm,
934 !intel_hdcp_in_use(dev_priv, cpu_transcoder, port))) {
935 drm_err(&dev_priv->drm,
936 "%s:%d HDCP link stopped encryption,%x\n",
937 connector->base.name, connector->base.base.id,
938 intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)));
939 ret = -ENXIO;
940 intel_hdcp_update_value(connector,
941 DRM_MODE_CONTENT_PROTECTION_DESIRED,
942 true);
943 goto out;
944 }
945
946 if (hdcp->shim->check_link(dig_port, connector)) {
947 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
948 intel_hdcp_update_value(connector,
949 DRM_MODE_CONTENT_PROTECTION_ENABLED, true);
950 }
951 goto out;
952 }
953
954 drm_dbg_kms(&dev_priv->drm,
955 "[%s:%d] HDCP link failed, retrying authentication\n",
956 connector->base.name, connector->base.base.id);
957
958 ret = _intel_hdcp_disable(connector);
959 if (ret) {
960 drm_err(&dev_priv->drm, "Failed to disable hdcp (%d)\n", ret);
961 intel_hdcp_update_value(connector,
962 DRM_MODE_CONTENT_PROTECTION_DESIRED,
963 true);
964 goto out;
965 }
966
967 ret = _intel_hdcp_enable(connector);
968 if (ret) {
969 drm_err(&dev_priv->drm, "Failed to enable hdcp (%d)\n", ret);
970 intel_hdcp_update_value(connector,
971 DRM_MODE_CONTENT_PROTECTION_DESIRED,
972 true);
973 goto out;
974 }
975
976 out:
977 mutex_unlock(&dig_port->hdcp_mutex);
978 mutex_unlock(&hdcp->mutex);
979 return ret;
980 }
981
intel_hdcp_prop_work(struct work_struct * work)982 static void intel_hdcp_prop_work(struct work_struct *work)
983 {
984 struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp,
985 prop_work);
986 struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
987 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
988
989 drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, NULL);
990 mutex_lock(&hdcp->mutex);
991
992 /*
993 * This worker is only used to flip between ENABLED/DESIRED. Either of
994 * those to UNDESIRED is handled by core. If value == UNDESIRED,
995 * we're running just after hdcp has been disabled, so just exit
996 */
997 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
998 drm_hdcp_update_content_protection(&connector->base,
999 hdcp->value);
1000
1001 mutex_unlock(&hdcp->mutex);
1002 drm_modeset_unlock(&dev_priv->drm.mode_config.connection_mutex);
1003
1004 drm_connector_put(&connector->base);
1005 }
1006
is_hdcp_supported(struct drm_i915_private * dev_priv,enum port port)1007 bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
1008 {
1009 return INTEL_INFO(dev_priv)->display.has_hdcp &&
1010 (INTEL_GEN(dev_priv) >= 12 || port < PORT_E);
1011 }
1012
1013 static int
hdcp2_prepare_ake_init(struct intel_connector * connector,struct hdcp2_ake_init * ake_data)1014 hdcp2_prepare_ake_init(struct intel_connector *connector,
1015 struct hdcp2_ake_init *ake_data)
1016 {
1017 struct hdcp_port_data *data = &connector->hdcp.port_data;
1018 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1019 struct i915_hdcp_comp_master *comp;
1020 int ret;
1021
1022 mutex_lock(&dev_priv->hdcp_comp_mutex);
1023 comp = dev_priv->hdcp_master;
1024
1025 if (!comp || !comp->ops) {
1026 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1027 return -EINVAL;
1028 }
1029
1030 ret = comp->ops->initiate_hdcp2_session(comp->mei_dev, data, ake_data);
1031 if (ret)
1032 drm_dbg_kms(&dev_priv->drm, "Prepare_ake_init failed. %d\n",
1033 ret);
1034 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1035
1036 return ret;
1037 }
1038
1039 static int
hdcp2_verify_rx_cert_prepare_km(struct intel_connector * connector,struct hdcp2_ake_send_cert * rx_cert,bool * paired,struct hdcp2_ake_no_stored_km * ek_pub_km,size_t * msg_sz)1040 hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
1041 struct hdcp2_ake_send_cert *rx_cert,
1042 bool *paired,
1043 struct hdcp2_ake_no_stored_km *ek_pub_km,
1044 size_t *msg_sz)
1045 {
1046 struct hdcp_port_data *data = &connector->hdcp.port_data;
1047 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1048 struct i915_hdcp_comp_master *comp;
1049 int ret;
1050
1051 mutex_lock(&dev_priv->hdcp_comp_mutex);
1052 comp = dev_priv->hdcp_master;
1053
1054 if (!comp || !comp->ops) {
1055 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1056 return -EINVAL;
1057 }
1058
1059 ret = comp->ops->verify_receiver_cert_prepare_km(comp->mei_dev, data,
1060 rx_cert, paired,
1061 ek_pub_km, msg_sz);
1062 if (ret < 0)
1063 drm_dbg_kms(&dev_priv->drm, "Verify rx_cert failed. %d\n",
1064 ret);
1065 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1066
1067 return ret;
1068 }
1069
hdcp2_verify_hprime(struct intel_connector * connector,struct hdcp2_ake_send_hprime * rx_hprime)1070 static int hdcp2_verify_hprime(struct intel_connector *connector,
1071 struct hdcp2_ake_send_hprime *rx_hprime)
1072 {
1073 struct hdcp_port_data *data = &connector->hdcp.port_data;
1074 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1075 struct i915_hdcp_comp_master *comp;
1076 int ret;
1077
1078 mutex_lock(&dev_priv->hdcp_comp_mutex);
1079 comp = dev_priv->hdcp_master;
1080
1081 if (!comp || !comp->ops) {
1082 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1083 return -EINVAL;
1084 }
1085
1086 ret = comp->ops->verify_hprime(comp->mei_dev, data, rx_hprime);
1087 if (ret < 0)
1088 drm_dbg_kms(&dev_priv->drm, "Verify hprime failed. %d\n", ret);
1089 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1090
1091 return ret;
1092 }
1093
1094 static int
hdcp2_store_pairing_info(struct intel_connector * connector,struct hdcp2_ake_send_pairing_info * pairing_info)1095 hdcp2_store_pairing_info(struct intel_connector *connector,
1096 struct hdcp2_ake_send_pairing_info *pairing_info)
1097 {
1098 struct hdcp_port_data *data = &connector->hdcp.port_data;
1099 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1100 struct i915_hdcp_comp_master *comp;
1101 int ret;
1102
1103 mutex_lock(&dev_priv->hdcp_comp_mutex);
1104 comp = dev_priv->hdcp_master;
1105
1106 if (!comp || !comp->ops) {
1107 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1108 return -EINVAL;
1109 }
1110
1111 ret = comp->ops->store_pairing_info(comp->mei_dev, data, pairing_info);
1112 if (ret < 0)
1113 drm_dbg_kms(&dev_priv->drm, "Store pairing info failed. %d\n",
1114 ret);
1115 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1116
1117 return ret;
1118 }
1119
1120 static int
hdcp2_prepare_lc_init(struct intel_connector * connector,struct hdcp2_lc_init * lc_init)1121 hdcp2_prepare_lc_init(struct intel_connector *connector,
1122 struct hdcp2_lc_init *lc_init)
1123 {
1124 struct hdcp_port_data *data = &connector->hdcp.port_data;
1125 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1126 struct i915_hdcp_comp_master *comp;
1127 int ret;
1128
1129 mutex_lock(&dev_priv->hdcp_comp_mutex);
1130 comp = dev_priv->hdcp_master;
1131
1132 if (!comp || !comp->ops) {
1133 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1134 return -EINVAL;
1135 }
1136
1137 ret = comp->ops->initiate_locality_check(comp->mei_dev, data, lc_init);
1138 if (ret < 0)
1139 drm_dbg_kms(&dev_priv->drm, "Prepare lc_init failed. %d\n",
1140 ret);
1141 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1142
1143 return ret;
1144 }
1145
1146 static int
hdcp2_verify_lprime(struct intel_connector * connector,struct hdcp2_lc_send_lprime * rx_lprime)1147 hdcp2_verify_lprime(struct intel_connector *connector,
1148 struct hdcp2_lc_send_lprime *rx_lprime)
1149 {
1150 struct hdcp_port_data *data = &connector->hdcp.port_data;
1151 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1152 struct i915_hdcp_comp_master *comp;
1153 int ret;
1154
1155 mutex_lock(&dev_priv->hdcp_comp_mutex);
1156 comp = dev_priv->hdcp_master;
1157
1158 if (!comp || !comp->ops) {
1159 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1160 return -EINVAL;
1161 }
1162
1163 ret = comp->ops->verify_lprime(comp->mei_dev, data, rx_lprime);
1164 if (ret < 0)
1165 drm_dbg_kms(&dev_priv->drm, "Verify L_Prime failed. %d\n",
1166 ret);
1167 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1168
1169 return ret;
1170 }
1171
hdcp2_prepare_skey(struct intel_connector * connector,struct hdcp2_ske_send_eks * ske_data)1172 static int hdcp2_prepare_skey(struct intel_connector *connector,
1173 struct hdcp2_ske_send_eks *ske_data)
1174 {
1175 struct hdcp_port_data *data = &connector->hdcp.port_data;
1176 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1177 struct i915_hdcp_comp_master *comp;
1178 int ret;
1179
1180 mutex_lock(&dev_priv->hdcp_comp_mutex);
1181 comp = dev_priv->hdcp_master;
1182
1183 if (!comp || !comp->ops) {
1184 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1185 return -EINVAL;
1186 }
1187
1188 ret = comp->ops->get_session_key(comp->mei_dev, data, ske_data);
1189 if (ret < 0)
1190 drm_dbg_kms(&dev_priv->drm, "Get session key failed. %d\n",
1191 ret);
1192 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1193
1194 return ret;
1195 }
1196
1197 static int
hdcp2_verify_rep_topology_prepare_ack(struct intel_connector * connector,struct hdcp2_rep_send_receiverid_list * rep_topology,struct hdcp2_rep_send_ack * rep_send_ack)1198 hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
1199 struct hdcp2_rep_send_receiverid_list
1200 *rep_topology,
1201 struct hdcp2_rep_send_ack *rep_send_ack)
1202 {
1203 struct hdcp_port_data *data = &connector->hdcp.port_data;
1204 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1205 struct i915_hdcp_comp_master *comp;
1206 int ret;
1207
1208 mutex_lock(&dev_priv->hdcp_comp_mutex);
1209 comp = dev_priv->hdcp_master;
1210
1211 if (!comp || !comp->ops) {
1212 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1213 return -EINVAL;
1214 }
1215
1216 ret = comp->ops->repeater_check_flow_prepare_ack(comp->mei_dev, data,
1217 rep_topology,
1218 rep_send_ack);
1219 if (ret < 0)
1220 drm_dbg_kms(&dev_priv->drm,
1221 "Verify rep topology failed. %d\n", ret);
1222 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1223
1224 return ret;
1225 }
1226
1227 static int
hdcp2_verify_mprime(struct intel_connector * connector,struct hdcp2_rep_stream_ready * stream_ready)1228 hdcp2_verify_mprime(struct intel_connector *connector,
1229 struct hdcp2_rep_stream_ready *stream_ready)
1230 {
1231 struct hdcp_port_data *data = &connector->hdcp.port_data;
1232 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1233 struct i915_hdcp_comp_master *comp;
1234 int ret;
1235
1236 mutex_lock(&dev_priv->hdcp_comp_mutex);
1237 comp = dev_priv->hdcp_master;
1238
1239 if (!comp || !comp->ops) {
1240 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1241 return -EINVAL;
1242 }
1243
1244 ret = comp->ops->verify_mprime(comp->mei_dev, data, stream_ready);
1245 if (ret < 0)
1246 drm_dbg_kms(&dev_priv->drm, "Verify mprime failed. %d\n", ret);
1247 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1248
1249 return ret;
1250 }
1251
hdcp2_authenticate_port(struct intel_connector * connector)1252 static int hdcp2_authenticate_port(struct intel_connector *connector)
1253 {
1254 struct hdcp_port_data *data = &connector->hdcp.port_data;
1255 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1256 struct i915_hdcp_comp_master *comp;
1257 int ret;
1258
1259 mutex_lock(&dev_priv->hdcp_comp_mutex);
1260 comp = dev_priv->hdcp_master;
1261
1262 if (!comp || !comp->ops) {
1263 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1264 return -EINVAL;
1265 }
1266
1267 ret = comp->ops->enable_hdcp_authentication(comp->mei_dev, data);
1268 if (ret < 0)
1269 drm_dbg_kms(&dev_priv->drm, "Enable hdcp auth failed. %d\n",
1270 ret);
1271 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1272
1273 return ret;
1274 }
1275
hdcp2_close_mei_session(struct intel_connector * connector)1276 static int hdcp2_close_mei_session(struct intel_connector *connector)
1277 {
1278 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1279 struct i915_hdcp_comp_master *comp;
1280 int ret;
1281
1282 mutex_lock(&dev_priv->hdcp_comp_mutex);
1283 comp = dev_priv->hdcp_master;
1284
1285 if (!comp || !comp->ops) {
1286 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1287 return -EINVAL;
1288 }
1289
1290 ret = comp->ops->close_hdcp_session(comp->mei_dev,
1291 &connector->hdcp.port_data);
1292 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1293
1294 return ret;
1295 }
1296
hdcp2_deauthenticate_port(struct intel_connector * connector)1297 static int hdcp2_deauthenticate_port(struct intel_connector *connector)
1298 {
1299 return hdcp2_close_mei_session(connector);
1300 }
1301
1302 /* Authentication flow starts from here */
hdcp2_authentication_key_exchange(struct intel_connector * connector)1303 static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
1304 {
1305 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1306 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1307 struct intel_hdcp *hdcp = &connector->hdcp;
1308 union {
1309 struct hdcp2_ake_init ake_init;
1310 struct hdcp2_ake_send_cert send_cert;
1311 struct hdcp2_ake_no_stored_km no_stored_km;
1312 struct hdcp2_ake_send_hprime send_hprime;
1313 struct hdcp2_ake_send_pairing_info pairing_info;
1314 } msgs;
1315 const struct intel_hdcp_shim *shim = hdcp->shim;
1316 size_t size;
1317 int ret;
1318
1319 /* Init for seq_num */
1320 hdcp->seq_num_v = 0;
1321 hdcp->seq_num_m = 0;
1322
1323 ret = hdcp2_prepare_ake_init(connector, &msgs.ake_init);
1324 if (ret < 0)
1325 return ret;
1326
1327 ret = shim->write_2_2_msg(dig_port, &msgs.ake_init,
1328 sizeof(msgs.ake_init));
1329 if (ret < 0)
1330 return ret;
1331
1332 ret = shim->read_2_2_msg(dig_port, HDCP_2_2_AKE_SEND_CERT,
1333 &msgs.send_cert, sizeof(msgs.send_cert));
1334 if (ret < 0)
1335 return ret;
1336
1337 if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) {
1338 drm_dbg_kms(&dev_priv->drm, "cert.rx_caps dont claim HDCP2.2\n");
1339 return -EINVAL;
1340 }
1341
1342 hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]);
1343
1344 if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
1345 msgs.send_cert.cert_rx.receiver_id,
1346 1) > 0) {
1347 drm_err(&dev_priv->drm, "Receiver ID is revoked\n");
1348 return -EPERM;
1349 }
1350
1351 /*
1352 * Here msgs.no_stored_km will hold msgs corresponding to the km
1353 * stored also.
1354 */
1355 ret = hdcp2_verify_rx_cert_prepare_km(connector, &msgs.send_cert,
1356 &hdcp->is_paired,
1357 &msgs.no_stored_km, &size);
1358 if (ret < 0)
1359 return ret;
1360
1361 ret = shim->write_2_2_msg(dig_port, &msgs.no_stored_km, size);
1362 if (ret < 0)
1363 return ret;
1364
1365 ret = shim->read_2_2_msg(dig_port, HDCP_2_2_AKE_SEND_HPRIME,
1366 &msgs.send_hprime, sizeof(msgs.send_hprime));
1367 if (ret < 0)
1368 return ret;
1369
1370 ret = hdcp2_verify_hprime(connector, &msgs.send_hprime);
1371 if (ret < 0)
1372 return ret;
1373
1374 if (!hdcp->is_paired) {
1375 /* Pairing is required */
1376 ret = shim->read_2_2_msg(dig_port,
1377 HDCP_2_2_AKE_SEND_PAIRING_INFO,
1378 &msgs.pairing_info,
1379 sizeof(msgs.pairing_info));
1380 if (ret < 0)
1381 return ret;
1382
1383 ret = hdcp2_store_pairing_info(connector, &msgs.pairing_info);
1384 if (ret < 0)
1385 return ret;
1386 hdcp->is_paired = true;
1387 }
1388
1389 return 0;
1390 }
1391
hdcp2_locality_check(struct intel_connector * connector)1392 static int hdcp2_locality_check(struct intel_connector *connector)
1393 {
1394 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1395 struct intel_hdcp *hdcp = &connector->hdcp;
1396 union {
1397 struct hdcp2_lc_init lc_init;
1398 struct hdcp2_lc_send_lprime send_lprime;
1399 } msgs;
1400 const struct intel_hdcp_shim *shim = hdcp->shim;
1401 int tries = HDCP2_LC_RETRY_CNT, ret, i;
1402
1403 for (i = 0; i < tries; i++) {
1404 ret = hdcp2_prepare_lc_init(connector, &msgs.lc_init);
1405 if (ret < 0)
1406 continue;
1407
1408 ret = shim->write_2_2_msg(dig_port, &msgs.lc_init,
1409 sizeof(msgs.lc_init));
1410 if (ret < 0)
1411 continue;
1412
1413 ret = shim->read_2_2_msg(dig_port,
1414 HDCP_2_2_LC_SEND_LPRIME,
1415 &msgs.send_lprime,
1416 sizeof(msgs.send_lprime));
1417 if (ret < 0)
1418 continue;
1419
1420 ret = hdcp2_verify_lprime(connector, &msgs.send_lprime);
1421 if (!ret)
1422 break;
1423 }
1424
1425 return ret;
1426 }
1427
hdcp2_session_key_exchange(struct intel_connector * connector)1428 static int hdcp2_session_key_exchange(struct intel_connector *connector)
1429 {
1430 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1431 struct intel_hdcp *hdcp = &connector->hdcp;
1432 struct hdcp2_ske_send_eks send_eks;
1433 int ret;
1434
1435 ret = hdcp2_prepare_skey(connector, &send_eks);
1436 if (ret < 0)
1437 return ret;
1438
1439 ret = hdcp->shim->write_2_2_msg(dig_port, &send_eks,
1440 sizeof(send_eks));
1441 if (ret < 0)
1442 return ret;
1443
1444 return 0;
1445 }
1446
1447 static
hdcp2_propagate_stream_management_info(struct intel_connector * connector)1448 int hdcp2_propagate_stream_management_info(struct intel_connector *connector)
1449 {
1450 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1451 struct drm_i915_private *i915 = to_i915(connector->base.dev);
1452 struct intel_hdcp *hdcp = &connector->hdcp;
1453 union {
1454 struct hdcp2_rep_stream_manage stream_manage;
1455 struct hdcp2_rep_stream_ready stream_ready;
1456 } msgs;
1457 const struct intel_hdcp_shim *shim = hdcp->shim;
1458 int ret;
1459
1460 /* Prepare RepeaterAuth_Stream_Manage msg */
1461 msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE;
1462 drm_hdcp_cpu_to_be24(msgs.stream_manage.seq_num_m, hdcp->seq_num_m);
1463
1464 /* K no of streams is fixed as 1. Stored as big-endian. */
1465 msgs.stream_manage.k = cpu_to_be16(1);
1466
1467 /* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
1468 msgs.stream_manage.streams[0].stream_id = 0;
1469 msgs.stream_manage.streams[0].stream_type = hdcp->content_type;
1470
1471 /* Send it to Repeater */
1472 ret = shim->write_2_2_msg(dig_port, &msgs.stream_manage,
1473 sizeof(msgs.stream_manage));
1474 if (ret < 0)
1475 return ret;
1476
1477 ret = shim->read_2_2_msg(dig_port, HDCP_2_2_REP_STREAM_READY,
1478 &msgs.stream_ready, sizeof(msgs.stream_ready));
1479 if (ret < 0)
1480 return ret;
1481
1482 hdcp->port_data.seq_num_m = hdcp->seq_num_m;
1483 hdcp->port_data.streams[0].stream_type = hdcp->content_type;
1484
1485 ret = hdcp2_verify_mprime(connector, &msgs.stream_ready);
1486 if (ret < 0)
1487 return ret;
1488
1489 hdcp->seq_num_m++;
1490
1491 if (hdcp->seq_num_m > HDCP_2_2_SEQ_NUM_MAX) {
1492 drm_dbg_kms(&i915->drm, "seq_num_m roll over.\n");
1493 return -1;
1494 }
1495
1496 return 0;
1497 }
1498
1499 static
hdcp2_authenticate_repeater_topology(struct intel_connector * connector)1500 int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
1501 {
1502 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1503 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1504 struct intel_hdcp *hdcp = &connector->hdcp;
1505 union {
1506 struct hdcp2_rep_send_receiverid_list recvid_list;
1507 struct hdcp2_rep_send_ack rep_ack;
1508 } msgs;
1509 const struct intel_hdcp_shim *shim = hdcp->shim;
1510 u32 seq_num_v, device_cnt;
1511 u8 *rx_info;
1512 int ret;
1513
1514 ret = shim->read_2_2_msg(dig_port, HDCP_2_2_REP_SEND_RECVID_LIST,
1515 &msgs.recvid_list, sizeof(msgs.recvid_list));
1516 if (ret < 0)
1517 return ret;
1518
1519 rx_info = msgs.recvid_list.rx_info;
1520
1521 if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) ||
1522 HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) {
1523 drm_dbg_kms(&dev_priv->drm, "Topology Max Size Exceeded\n");
1524 return -EINVAL;
1525 }
1526
1527 /* Converting and Storing the seq_num_v to local variable as DWORD */
1528 seq_num_v =
1529 drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v);
1530
1531 if (!hdcp->hdcp2_encrypted && seq_num_v) {
1532 drm_dbg_kms(&dev_priv->drm,
1533 "Non zero Seq_num_v at first RecvId_List msg\n");
1534 return -EINVAL;
1535 }
1536
1537 if (seq_num_v < hdcp->seq_num_v) {
1538 /* Roll over of the seq_num_v from repeater. Reauthenticate. */
1539 drm_dbg_kms(&dev_priv->drm, "Seq_num_v roll over.\n");
1540 return -EINVAL;
1541 }
1542
1543 device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
1544 HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
1545 if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
1546 msgs.recvid_list.receiver_ids,
1547 device_cnt) > 0) {
1548 drm_err(&dev_priv->drm, "Revoked receiver ID(s) is in list\n");
1549 return -EPERM;
1550 }
1551
1552 ret = hdcp2_verify_rep_topology_prepare_ack(connector,
1553 &msgs.recvid_list,
1554 &msgs.rep_ack);
1555 if (ret < 0)
1556 return ret;
1557
1558 hdcp->seq_num_v = seq_num_v;
1559 ret = shim->write_2_2_msg(dig_port, &msgs.rep_ack,
1560 sizeof(msgs.rep_ack));
1561 if (ret < 0)
1562 return ret;
1563
1564 return 0;
1565 }
1566
hdcp2_authenticate_repeater(struct intel_connector * connector)1567 static int hdcp2_authenticate_repeater(struct intel_connector *connector)
1568 {
1569 int ret;
1570
1571 ret = hdcp2_authenticate_repeater_topology(connector);
1572 if (ret < 0)
1573 return ret;
1574
1575 return hdcp2_propagate_stream_management_info(connector);
1576 }
1577
hdcp2_authenticate_sink(struct intel_connector * connector)1578 static int hdcp2_authenticate_sink(struct intel_connector *connector)
1579 {
1580 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1581 struct drm_i915_private *i915 = to_i915(connector->base.dev);
1582 struct intel_hdcp *hdcp = &connector->hdcp;
1583 const struct intel_hdcp_shim *shim = hdcp->shim;
1584 int ret;
1585
1586 ret = hdcp2_authentication_key_exchange(connector);
1587 if (ret < 0) {
1588 drm_dbg_kms(&i915->drm, "AKE Failed. Err : %d\n", ret);
1589 return ret;
1590 }
1591
1592 ret = hdcp2_locality_check(connector);
1593 if (ret < 0) {
1594 drm_dbg_kms(&i915->drm,
1595 "Locality Check failed. Err : %d\n", ret);
1596 return ret;
1597 }
1598
1599 ret = hdcp2_session_key_exchange(connector);
1600 if (ret < 0) {
1601 drm_dbg_kms(&i915->drm, "SKE Failed. Err : %d\n", ret);
1602 return ret;
1603 }
1604
1605 if (shim->config_stream_type) {
1606 ret = shim->config_stream_type(dig_port,
1607 hdcp->is_repeater,
1608 hdcp->content_type);
1609 if (ret < 0)
1610 return ret;
1611 }
1612
1613 if (hdcp->is_repeater) {
1614 ret = hdcp2_authenticate_repeater(connector);
1615 if (ret < 0) {
1616 drm_dbg_kms(&i915->drm,
1617 "Repeater Auth Failed. Err: %d\n", ret);
1618 return ret;
1619 }
1620 }
1621
1622 hdcp->port_data.streams[0].stream_type = hdcp->content_type;
1623 ret = hdcp2_authenticate_port(connector);
1624 if (ret < 0)
1625 return ret;
1626
1627 return ret;
1628 }
1629
hdcp2_enable_encryption(struct intel_connector * connector)1630 static int hdcp2_enable_encryption(struct intel_connector *connector)
1631 {
1632 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1633 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1634 struct intel_hdcp *hdcp = &connector->hdcp;
1635 enum port port = dig_port->base.port;
1636 enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1637 int ret;
1638
1639 drm_WARN_ON(&dev_priv->drm,
1640 intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1641 LINK_ENCRYPTION_STATUS);
1642 if (hdcp->shim->toggle_signalling) {
1643 ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder,
1644 true);
1645 if (ret) {
1646 drm_err(&dev_priv->drm,
1647 "Failed to enable HDCP signalling. %d\n",
1648 ret);
1649 return ret;
1650 }
1651 }
1652
1653 if (intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1654 LINK_AUTH_STATUS) {
1655 /* Link is Authenticated. Now set for Encryption */
1656 intel_de_write(dev_priv,
1657 HDCP2_CTL(dev_priv, cpu_transcoder, port),
1658 intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) | CTL_LINK_ENCRYPTION_REQ);
1659 }
1660
1661 ret = intel_de_wait_for_set(dev_priv,
1662 HDCP2_STATUS(dev_priv, cpu_transcoder,
1663 port),
1664 LINK_ENCRYPTION_STATUS,
1665 ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1666
1667 return ret;
1668 }
1669
hdcp2_disable_encryption(struct intel_connector * connector)1670 static int hdcp2_disable_encryption(struct intel_connector *connector)
1671 {
1672 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1673 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1674 struct intel_hdcp *hdcp = &connector->hdcp;
1675 enum port port = dig_port->base.port;
1676 enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1677 int ret;
1678
1679 drm_WARN_ON(&dev_priv->drm, !(intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1680 LINK_ENCRYPTION_STATUS));
1681
1682 intel_de_write(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port),
1683 intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) & ~CTL_LINK_ENCRYPTION_REQ);
1684
1685 ret = intel_de_wait_for_clear(dev_priv,
1686 HDCP2_STATUS(dev_priv, cpu_transcoder,
1687 port),
1688 LINK_ENCRYPTION_STATUS,
1689 ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1690 if (ret == -ETIMEDOUT)
1691 drm_dbg_kms(&dev_priv->drm, "Disable Encryption Timedout");
1692
1693 if (hdcp->shim->toggle_signalling) {
1694 ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder,
1695 false);
1696 if (ret) {
1697 drm_err(&dev_priv->drm,
1698 "Failed to disable HDCP signalling. %d\n",
1699 ret);
1700 return ret;
1701 }
1702 }
1703
1704 return ret;
1705 }
1706
hdcp2_authenticate_and_encrypt(struct intel_connector * connector)1707 static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
1708 {
1709 struct drm_i915_private *i915 = to_i915(connector->base.dev);
1710 int ret, i, tries = 3;
1711
1712 for (i = 0; i < tries; i++) {
1713 ret = hdcp2_authenticate_sink(connector);
1714 if (!ret)
1715 break;
1716
1717 /* Clearing the mei hdcp session */
1718 drm_dbg_kms(&i915->drm, "HDCP2.2 Auth %d of %d Failed.(%d)\n",
1719 i + 1, tries, ret);
1720 if (hdcp2_deauthenticate_port(connector) < 0)
1721 drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
1722 }
1723
1724 if (i != tries) {
1725 /*
1726 * Ensuring the required 200mSec min time interval between
1727 * Session Key Exchange and encryption.
1728 */
1729 msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN);
1730 ret = hdcp2_enable_encryption(connector);
1731 if (ret < 0) {
1732 drm_dbg_kms(&i915->drm,
1733 "Encryption Enable Failed.(%d)\n", ret);
1734 if (hdcp2_deauthenticate_port(connector) < 0)
1735 drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
1736 }
1737 }
1738
1739 return ret;
1740 }
1741
_intel_hdcp2_enable(struct intel_connector * connector)1742 static int _intel_hdcp2_enable(struct intel_connector *connector)
1743 {
1744 struct drm_i915_private *i915 = to_i915(connector->base.dev);
1745 struct intel_hdcp *hdcp = &connector->hdcp;
1746 int ret;
1747
1748 drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being enabled. Type: %d\n",
1749 connector->base.name, connector->base.base.id,
1750 hdcp->content_type);
1751
1752 ret = hdcp2_authenticate_and_encrypt(connector);
1753 if (ret) {
1754 drm_dbg_kms(&i915->drm, "HDCP2 Type%d Enabling Failed. (%d)\n",
1755 hdcp->content_type, ret);
1756 return ret;
1757 }
1758
1759 drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is enabled. Type %d\n",
1760 connector->base.name, connector->base.base.id,
1761 hdcp->content_type);
1762
1763 hdcp->hdcp2_encrypted = true;
1764 return 0;
1765 }
1766
_intel_hdcp2_disable(struct intel_connector * connector)1767 static int _intel_hdcp2_disable(struct intel_connector *connector)
1768 {
1769 struct drm_i915_private *i915 = to_i915(connector->base.dev);
1770 int ret;
1771
1772 drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being Disabled\n",
1773 connector->base.name, connector->base.base.id);
1774
1775 ret = hdcp2_disable_encryption(connector);
1776
1777 if (hdcp2_deauthenticate_port(connector) < 0)
1778 drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
1779
1780 connector->hdcp.hdcp2_encrypted = false;
1781
1782 return ret;
1783 }
1784
1785 /* Implements the Link Integrity Check for HDCP2.2 */
intel_hdcp2_check_link(struct intel_connector * connector)1786 static int intel_hdcp2_check_link(struct intel_connector *connector)
1787 {
1788 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1789 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1790 struct intel_hdcp *hdcp = &connector->hdcp;
1791 enum port port = dig_port->base.port;
1792 enum transcoder cpu_transcoder;
1793 int ret = 0;
1794
1795 mutex_lock(&hdcp->mutex);
1796 cpu_transcoder = hdcp->cpu_transcoder;
1797
1798 /* hdcp2_check_link is expected only when HDCP2.2 is Enabled */
1799 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
1800 !hdcp->hdcp2_encrypted) {
1801 ret = -EINVAL;
1802 goto out;
1803 }
1804
1805 if (drm_WARN_ON(&dev_priv->drm,
1806 !intel_hdcp2_in_use(dev_priv, cpu_transcoder, port))) {
1807 drm_err(&dev_priv->drm,
1808 "HDCP2.2 link stopped the encryption, %x\n",
1809 intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)));
1810 ret = -ENXIO;
1811 intel_hdcp_update_value(connector,
1812 DRM_MODE_CONTENT_PROTECTION_DESIRED,
1813 true);
1814 goto out;
1815 }
1816
1817 ret = hdcp->shim->check_2_2_link(dig_port);
1818 if (ret == HDCP_LINK_PROTECTED) {
1819 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
1820 intel_hdcp_update_value(connector,
1821 DRM_MODE_CONTENT_PROTECTION_ENABLED,
1822 true);
1823 }
1824 goto out;
1825 }
1826
1827 if (ret == HDCP_TOPOLOGY_CHANGE) {
1828 if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
1829 goto out;
1830
1831 drm_dbg_kms(&dev_priv->drm,
1832 "HDCP2.2 Downstream topology change\n");
1833 ret = hdcp2_authenticate_repeater_topology(connector);
1834 if (!ret) {
1835 intel_hdcp_update_value(connector,
1836 DRM_MODE_CONTENT_PROTECTION_ENABLED,
1837 true);
1838 goto out;
1839 }
1840 drm_dbg_kms(&dev_priv->drm,
1841 "[%s:%d] Repeater topology auth failed.(%d)\n",
1842 connector->base.name, connector->base.base.id,
1843 ret);
1844 } else {
1845 drm_dbg_kms(&dev_priv->drm,
1846 "[%s:%d] HDCP2.2 link failed, retrying auth\n",
1847 connector->base.name, connector->base.base.id);
1848 }
1849
1850 ret = _intel_hdcp2_disable(connector);
1851 if (ret) {
1852 drm_err(&dev_priv->drm,
1853 "[%s:%d] Failed to disable hdcp2.2 (%d)\n",
1854 connector->base.name, connector->base.base.id, ret);
1855 intel_hdcp_update_value(connector,
1856 DRM_MODE_CONTENT_PROTECTION_DESIRED, true);
1857 goto out;
1858 }
1859
1860 ret = _intel_hdcp2_enable(connector);
1861 if (ret) {
1862 drm_dbg_kms(&dev_priv->drm,
1863 "[%s:%d] Failed to enable hdcp2.2 (%d)\n",
1864 connector->base.name, connector->base.base.id,
1865 ret);
1866 intel_hdcp_update_value(connector,
1867 DRM_MODE_CONTENT_PROTECTION_DESIRED,
1868 true);
1869 goto out;
1870 }
1871
1872 out:
1873 mutex_unlock(&hdcp->mutex);
1874 return ret;
1875 }
1876
intel_hdcp_check_work(struct work_struct * work)1877 static void intel_hdcp_check_work(struct work_struct *work)
1878 {
1879 struct intel_hdcp *hdcp = container_of(to_delayed_work(work),
1880 struct intel_hdcp,
1881 check_work);
1882 struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
1883
1884 if (drm_connector_is_unregistered(&connector->base))
1885 return;
1886
1887 if (!intel_hdcp2_check_link(connector))
1888 schedule_delayed_work(&hdcp->check_work,
1889 DRM_HDCP2_CHECK_PERIOD_MS);
1890 else if (!intel_hdcp_check_link(connector))
1891 schedule_delayed_work(&hdcp->check_work,
1892 DRM_HDCP_CHECK_PERIOD_MS);
1893 }
1894
i915_hdcp_component_bind(struct device * i915_kdev,struct device * mei_kdev,void * data)1895 static int i915_hdcp_component_bind(struct device *i915_kdev,
1896 struct device *mei_kdev, void *data)
1897 {
1898 struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
1899
1900 drm_dbg(&dev_priv->drm, "I915 HDCP comp bind\n");
1901 mutex_lock(&dev_priv->hdcp_comp_mutex);
1902 dev_priv->hdcp_master = (struct i915_hdcp_comp_master *)data;
1903 dev_priv->hdcp_master->mei_dev = mei_kdev;
1904 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1905
1906 return 0;
1907 }
1908
i915_hdcp_component_unbind(struct device * i915_kdev,struct device * mei_kdev,void * data)1909 static void i915_hdcp_component_unbind(struct device *i915_kdev,
1910 struct device *mei_kdev, void *data)
1911 {
1912 struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
1913
1914 drm_dbg(&dev_priv->drm, "I915 HDCP comp unbind\n");
1915 mutex_lock(&dev_priv->hdcp_comp_mutex);
1916 dev_priv->hdcp_master = NULL;
1917 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1918 }
1919
1920 static const struct component_ops i915_hdcp_component_ops = {
1921 .bind = i915_hdcp_component_bind,
1922 .unbind = i915_hdcp_component_unbind,
1923 };
1924
intel_get_mei_fw_ddi_index(enum port port)1925 static enum mei_fw_ddi intel_get_mei_fw_ddi_index(enum port port)
1926 {
1927 switch (port) {
1928 case PORT_A:
1929 return MEI_DDI_A;
1930 case PORT_B ... PORT_F:
1931 return (enum mei_fw_ddi)port;
1932 default:
1933 return MEI_DDI_INVALID_PORT;
1934 }
1935 }
1936
intel_get_mei_fw_tc(enum transcoder cpu_transcoder)1937 static enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder)
1938 {
1939 switch (cpu_transcoder) {
1940 case TRANSCODER_A ... TRANSCODER_D:
1941 return (enum mei_fw_tc)(cpu_transcoder | 0x10);
1942 default: /* eDP, DSI TRANSCODERS are non HDCP capable */
1943 return MEI_INVALID_TRANSCODER;
1944 }
1945 }
1946
initialize_hdcp_port_data(struct intel_connector * connector,enum port port,const struct intel_hdcp_shim * shim)1947 static int initialize_hdcp_port_data(struct intel_connector *connector,
1948 enum port port,
1949 const struct intel_hdcp_shim *shim)
1950 {
1951 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1952 struct intel_hdcp *hdcp = &connector->hdcp;
1953 struct hdcp_port_data *data = &hdcp->port_data;
1954
1955 if (INTEL_GEN(dev_priv) < 12)
1956 data->fw_ddi = intel_get_mei_fw_ddi_index(port);
1957 else
1958 /*
1959 * As per ME FW API expectation, for GEN 12+, fw_ddi is filled
1960 * with zero(INVALID PORT index).
1961 */
1962 data->fw_ddi = MEI_DDI_INVALID_PORT;
1963
1964 /*
1965 * As associated transcoder is set and modified at modeset, here fw_tc
1966 * is initialized to zero (invalid transcoder index). This will be
1967 * retained for <Gen12 forever.
1968 */
1969 data->fw_tc = MEI_INVALID_TRANSCODER;
1970
1971 data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
1972 data->protocol = (u8)shim->protocol;
1973
1974 data->k = 1;
1975 if (!data->streams)
1976 data->streams = kcalloc(data->k,
1977 sizeof(struct hdcp2_streamid_type),
1978 GFP_KERNEL);
1979 if (!data->streams) {
1980 drm_err(&dev_priv->drm, "Out of Memory\n");
1981 return -ENOMEM;
1982 }
1983
1984 data->streams[0].stream_id = 0;
1985 data->streams[0].stream_type = hdcp->content_type;
1986
1987 return 0;
1988 }
1989
is_hdcp2_supported(struct drm_i915_private * dev_priv)1990 static bool is_hdcp2_supported(struct drm_i915_private *dev_priv)
1991 {
1992 if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
1993 return false;
1994
1995 return (INTEL_GEN(dev_priv) >= 10 ||
1996 IS_GEMINILAKE(dev_priv) ||
1997 IS_KABYLAKE(dev_priv) ||
1998 IS_COFFEELAKE(dev_priv) ||
1999 IS_COMETLAKE(dev_priv));
2000 }
2001
intel_hdcp_component_init(struct drm_i915_private * dev_priv)2002 void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
2003 {
2004 int ret;
2005
2006 if (!is_hdcp2_supported(dev_priv))
2007 return;
2008
2009 mutex_lock(&dev_priv->hdcp_comp_mutex);
2010 drm_WARN_ON(&dev_priv->drm, dev_priv->hdcp_comp_added);
2011
2012 dev_priv->hdcp_comp_added = true;
2013 mutex_unlock(&dev_priv->hdcp_comp_mutex);
2014 ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_component_ops,
2015 I915_COMPONENT_HDCP);
2016 if (ret < 0) {
2017 drm_dbg_kms(&dev_priv->drm, "Failed at component add(%d)\n",
2018 ret);
2019 mutex_lock(&dev_priv->hdcp_comp_mutex);
2020 dev_priv->hdcp_comp_added = false;
2021 mutex_unlock(&dev_priv->hdcp_comp_mutex);
2022 return;
2023 }
2024 }
2025
intel_hdcp2_init(struct intel_connector * connector,enum port port,const struct intel_hdcp_shim * shim)2026 static void intel_hdcp2_init(struct intel_connector *connector, enum port port,
2027 const struct intel_hdcp_shim *shim)
2028 {
2029 struct drm_i915_private *i915 = to_i915(connector->base.dev);
2030 struct intel_hdcp *hdcp = &connector->hdcp;
2031 int ret;
2032
2033 ret = initialize_hdcp_port_data(connector, port, shim);
2034 if (ret) {
2035 drm_dbg_kms(&i915->drm, "Mei hdcp data init failed\n");
2036 return;
2037 }
2038
2039 hdcp->hdcp2_supported = true;
2040 }
2041
intel_hdcp_init(struct intel_connector * connector,enum port port,const struct intel_hdcp_shim * shim)2042 int intel_hdcp_init(struct intel_connector *connector,
2043 enum port port,
2044 const struct intel_hdcp_shim *shim)
2045 {
2046 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
2047 struct intel_hdcp *hdcp = &connector->hdcp;
2048 int ret;
2049
2050 if (!shim)
2051 return -EINVAL;
2052
2053 if (is_hdcp2_supported(dev_priv) && !connector->mst_port)
2054 intel_hdcp2_init(connector, port, shim);
2055
2056 ret =
2057 drm_connector_attach_content_protection_property(&connector->base,
2058 hdcp->hdcp2_supported);
2059 if (ret) {
2060 hdcp->hdcp2_supported = false;
2061 kfree(hdcp->port_data.streams);
2062 return ret;
2063 }
2064
2065 hdcp->shim = shim;
2066 mutex_init(&hdcp->mutex);
2067 INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
2068 INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
2069 init_waitqueue_head(&hdcp->cp_irq_queue);
2070
2071 return 0;
2072 }
2073
intel_hdcp_enable(struct intel_connector * connector,enum transcoder cpu_transcoder,u8 content_type)2074 int intel_hdcp_enable(struct intel_connector *connector,
2075 enum transcoder cpu_transcoder, u8 content_type)
2076 {
2077 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
2078 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2079 struct intel_hdcp *hdcp = &connector->hdcp;
2080 unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS;
2081 int ret = -EINVAL;
2082
2083 if (!hdcp->shim)
2084 return -ENOENT;
2085
2086 mutex_lock(&hdcp->mutex);
2087 mutex_lock(&dig_port->hdcp_mutex);
2088 drm_WARN_ON(&dev_priv->drm,
2089 hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
2090 hdcp->content_type = content_type;
2091 hdcp->cpu_transcoder = cpu_transcoder;
2092
2093 if (INTEL_GEN(dev_priv) >= 12)
2094 hdcp->port_data.fw_tc = intel_get_mei_fw_tc(cpu_transcoder);
2095
2096 /*
2097 * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
2098 * is capable of HDCP2.2, it is preferred to use HDCP2.2.
2099 */
2100 if (intel_hdcp2_capable(connector)) {
2101 ret = _intel_hdcp2_enable(connector);
2102 if (!ret)
2103 check_link_interval = DRM_HDCP2_CHECK_PERIOD_MS;
2104 }
2105
2106 /*
2107 * When HDCP2.2 fails and Content Type is not Type1, HDCP1.4 will
2108 * be attempted.
2109 */
2110 if (ret && intel_hdcp_capable(connector) &&
2111 hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) {
2112 ret = _intel_hdcp_enable(connector);
2113 }
2114
2115 if (!ret) {
2116 schedule_delayed_work(&hdcp->check_work, check_link_interval);
2117 intel_hdcp_update_value(connector,
2118 DRM_MODE_CONTENT_PROTECTION_ENABLED,
2119 true);
2120 }
2121
2122 mutex_unlock(&dig_port->hdcp_mutex);
2123 mutex_unlock(&hdcp->mutex);
2124 return ret;
2125 }
2126
intel_hdcp_disable(struct intel_connector * connector)2127 int intel_hdcp_disable(struct intel_connector *connector)
2128 {
2129 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2130 struct intel_hdcp *hdcp = &connector->hdcp;
2131 int ret = 0;
2132
2133 if (!hdcp->shim)
2134 return -ENOENT;
2135
2136 mutex_lock(&hdcp->mutex);
2137 mutex_lock(&dig_port->hdcp_mutex);
2138
2139 if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
2140 goto out;
2141
2142 intel_hdcp_update_value(connector,
2143 DRM_MODE_CONTENT_PROTECTION_UNDESIRED, false);
2144 if (hdcp->hdcp2_encrypted)
2145 ret = _intel_hdcp2_disable(connector);
2146 else if (hdcp->hdcp_encrypted)
2147 ret = _intel_hdcp_disable(connector);
2148
2149 out:
2150 mutex_unlock(&dig_port->hdcp_mutex);
2151 mutex_unlock(&hdcp->mutex);
2152 cancel_delayed_work_sync(&hdcp->check_work);
2153 return ret;
2154 }
2155
intel_hdcp_update_pipe(struct intel_atomic_state * state,struct intel_encoder * encoder,const struct intel_crtc_state * crtc_state,const struct drm_connector_state * conn_state)2156 void intel_hdcp_update_pipe(struct intel_atomic_state *state,
2157 struct intel_encoder *encoder,
2158 const struct intel_crtc_state *crtc_state,
2159 const struct drm_connector_state *conn_state)
2160 {
2161 struct intel_connector *connector =
2162 to_intel_connector(conn_state->connector);
2163 struct intel_hdcp *hdcp = &connector->hdcp;
2164 bool content_protection_type_changed, desired_and_not_enabled = false;
2165
2166 if (!connector->hdcp.shim)
2167 return;
2168
2169 content_protection_type_changed =
2170 (conn_state->hdcp_content_type != hdcp->content_type &&
2171 conn_state->content_protection !=
2172 DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
2173
2174 /*
2175 * During the HDCP encryption session if Type change is requested,
2176 * disable the HDCP and reenable it with new TYPE value.
2177 */
2178 if (conn_state->content_protection ==
2179 DRM_MODE_CONTENT_PROTECTION_UNDESIRED ||
2180 content_protection_type_changed)
2181 intel_hdcp_disable(connector);
2182
2183 /*
2184 * Mark the hdcp state as DESIRED after the hdcp disable of type
2185 * change procedure.
2186 */
2187 if (content_protection_type_changed) {
2188 mutex_lock(&hdcp->mutex);
2189 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2190 schedule_work(&hdcp->prop_work);
2191 mutex_unlock(&hdcp->mutex);
2192 }
2193
2194 if (conn_state->content_protection ==
2195 DRM_MODE_CONTENT_PROTECTION_DESIRED) {
2196 mutex_lock(&hdcp->mutex);
2197 /* Avoid enabling hdcp, if it already ENABLED */
2198 desired_and_not_enabled =
2199 hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED;
2200 mutex_unlock(&hdcp->mutex);
2201 }
2202
2203 if (desired_and_not_enabled || content_protection_type_changed)
2204 intel_hdcp_enable(connector,
2205 crtc_state->cpu_transcoder,
2206 (u8)conn_state->hdcp_content_type);
2207 }
2208
intel_hdcp_component_fini(struct drm_i915_private * dev_priv)2209 void intel_hdcp_component_fini(struct drm_i915_private *dev_priv)
2210 {
2211 mutex_lock(&dev_priv->hdcp_comp_mutex);
2212 if (!dev_priv->hdcp_comp_added) {
2213 mutex_unlock(&dev_priv->hdcp_comp_mutex);
2214 return;
2215 }
2216
2217 dev_priv->hdcp_comp_added = false;
2218 mutex_unlock(&dev_priv->hdcp_comp_mutex);
2219
2220 component_del(dev_priv->drm.dev, &i915_hdcp_component_ops);
2221 }
2222
intel_hdcp_cleanup(struct intel_connector * connector)2223 void intel_hdcp_cleanup(struct intel_connector *connector)
2224 {
2225 struct intel_hdcp *hdcp = &connector->hdcp;
2226
2227 if (!hdcp->shim)
2228 return;
2229
2230 /*
2231 * If the connector is registered, it's possible userspace could kick
2232 * off another HDCP enable, which would re-spawn the workers.
2233 */
2234 drm_WARN_ON(connector->base.dev,
2235 connector->base.registration_state == DRM_CONNECTOR_REGISTERED);
2236
2237 /*
2238 * Now that the connector is not registered, check_work won't be run,
2239 * but cancel any outstanding instances of it
2240 */
2241 cancel_delayed_work_sync(&hdcp->check_work);
2242
2243 /*
2244 * We don't cancel prop_work in the same way as check_work since it
2245 * requires connection_mutex which could be held while calling this
2246 * function. Instead, we rely on the connector references grabbed before
2247 * scheduling prop_work to ensure the connector is alive when prop_work
2248 * is run. So if we're in the destroy path (which is where this
2249 * function should be called), we're "guaranteed" that prop_work is not
2250 * active (tl;dr This Should Never Happen).
2251 */
2252 drm_WARN_ON(connector->base.dev, work_pending(&hdcp->prop_work));
2253
2254 mutex_lock(&hdcp->mutex);
2255 kfree(hdcp->port_data.streams);
2256 hdcp->shim = NULL;
2257 mutex_unlock(&hdcp->mutex);
2258 }
2259
intel_hdcp_atomic_check(struct drm_connector * connector,struct drm_connector_state * old_state,struct drm_connector_state * new_state)2260 void intel_hdcp_atomic_check(struct drm_connector *connector,
2261 struct drm_connector_state *old_state,
2262 struct drm_connector_state *new_state)
2263 {
2264 u64 old_cp = old_state->content_protection;
2265 u64 new_cp = new_state->content_protection;
2266 struct drm_crtc_state *crtc_state;
2267
2268 if (!new_state->crtc) {
2269 /*
2270 * If the connector is being disabled with CP enabled, mark it
2271 * desired so it's re-enabled when the connector is brought back
2272 */
2273 if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2274 new_state->content_protection =
2275 DRM_MODE_CONTENT_PROTECTION_DESIRED;
2276 return;
2277 }
2278
2279 crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
2280 new_state->crtc);
2281 /*
2282 * Fix the HDCP uapi content protection state in case of modeset.
2283 * FIXME: As per HDCP content protection property uapi doc, an uevent()
2284 * need to be sent if there is transition from ENABLED->DESIRED.
2285 */
2286 if (drm_atomic_crtc_needs_modeset(crtc_state) &&
2287 (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
2288 new_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED))
2289 new_state->content_protection =
2290 DRM_MODE_CONTENT_PROTECTION_DESIRED;
2291
2292 /*
2293 * Nothing to do if the state didn't change, or HDCP was activated since
2294 * the last commit. And also no change in hdcp content type.
2295 */
2296 if (old_cp == new_cp ||
2297 (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
2298 new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)) {
2299 if (old_state->hdcp_content_type ==
2300 new_state->hdcp_content_type)
2301 return;
2302 }
2303
2304 crtc_state->mode_changed = true;
2305 }
2306
2307 /* Handles the CP_IRQ raised from the DP HDCP sink */
intel_hdcp_handle_cp_irq(struct intel_connector * connector)2308 void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
2309 {
2310 struct intel_hdcp *hdcp = &connector->hdcp;
2311
2312 if (!hdcp->shim)
2313 return;
2314
2315 atomic_inc(&connector->hdcp.cp_irq_count);
2316 wake_up_all(&connector->hdcp.cp_irq_queue);
2317
2318 schedule_delayed_work(&hdcp->check_work, 0);
2319 }
2320