xref: /linux/drivers/gpu/drm/i915/display/intel_hdcp.c (revision 84b9b44b)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright (C) 2017 Google, Inc.
4  * Copyright _ 2017-2019, Intel Corporation.
5  *
6  * Authors:
7  * Sean Paul <seanpaul@chromium.org>
8  * Ramalingam C <ramalingam.c@intel.com>
9  */
10 
11 #include <linux/component.h>
12 #include <linux/i2c.h>
13 #include <linux/random.h>
14 
15 #include <drm/display/drm_hdcp_helper.h>
16 #include <drm/i915_component.h>
17 
18 #include "i915_drv.h"
19 #include "i915_reg.h"
20 #include "intel_connector.h"
21 #include "intel_de.h"
22 #include "intel_display_power.h"
23 #include "intel_display_power_well.h"
24 #include "intel_display_types.h"
25 #include "intel_hdcp.h"
26 #include "intel_hdcp_gsc.h"
27 #include "intel_hdcp_regs.h"
28 #include "intel_pcode.h"
29 
30 #define KEY_LOAD_TRIES	5
31 #define HDCP2_LC_RETRY_CNT			3
32 
33 static int intel_conn_to_vcpi(struct intel_connector *connector)
34 {
35 	struct drm_dp_mst_topology_mgr *mgr;
36 	struct drm_dp_mst_atomic_payload *payload;
37 	struct drm_dp_mst_topology_state *mst_state;
38 	int vcpi = 0;
39 
40 	/* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
41 	if (!connector->port)
42 		return 0;
43 	mgr = connector->port->mgr;
44 
45 	drm_modeset_lock(&mgr->base.lock, NULL);
46 	mst_state = to_drm_dp_mst_topology_state(mgr->base.state);
47 	payload = drm_atomic_get_mst_payload_state(mst_state, connector->port);
48 	if (drm_WARN_ON(mgr->dev, !payload))
49 		goto out;
50 
51 	vcpi = payload->vcpi;
52 	if (drm_WARN_ON(mgr->dev, vcpi < 0)) {
53 		vcpi = 0;
54 		goto out;
55 	}
56 out:
57 	drm_modeset_unlock(&mgr->base.lock);
58 	return vcpi;
59 }
60 
61 /*
62  * intel_hdcp_required_content_stream selects the most highest common possible HDCP
63  * content_type for all streams in DP MST topology because security f/w doesn't
64  * have any provision to mark content_type for each stream separately, it marks
65  * all available streams with the content_type proivided at the time of port
66  * authentication. This may prohibit the userspace to use type1 content on
67  * HDCP 2.2 capable sink because of other sink are not capable of HDCP 2.2 in
68  * DP MST topology. Though it is not compulsory, security fw should change its
69  * policy to mark different content_types for different streams.
70  */
71 static int
72 intel_hdcp_required_content_stream(struct intel_digital_port *dig_port)
73 {
74 	struct drm_connector_list_iter conn_iter;
75 	struct intel_digital_port *conn_dig_port;
76 	struct intel_connector *connector;
77 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
78 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
79 	bool enforce_type0 = false;
80 	int k;
81 
82 	data->k = 0;
83 
84 	if (dig_port->hdcp_auth_status)
85 		return 0;
86 
87 	drm_connector_list_iter_begin(&i915->drm, &conn_iter);
88 	for_each_intel_connector_iter(connector, &conn_iter) {
89 		if (connector->base.status == connector_status_disconnected)
90 			continue;
91 
92 		if (!intel_encoder_is_mst(intel_attached_encoder(connector)))
93 			continue;
94 
95 		conn_dig_port = intel_attached_dig_port(connector);
96 		if (conn_dig_port != dig_port)
97 			continue;
98 
99 		if (!enforce_type0 && !dig_port->hdcp_mst_type1_capable)
100 			enforce_type0 = true;
101 
102 		data->streams[data->k].stream_id = intel_conn_to_vcpi(connector);
103 		data->k++;
104 
105 		/* if there is only one active stream */
106 		if (dig_port->dp.active_mst_links <= 1)
107 			break;
108 	}
109 	drm_connector_list_iter_end(&conn_iter);
110 
111 	if (drm_WARN_ON(&i915->drm, data->k > INTEL_NUM_PIPES(i915) || data->k == 0))
112 		return -EINVAL;
113 
114 	/*
115 	 * Apply common protection level across all streams in DP MST Topology.
116 	 * Use highest supported content type for all streams in DP MST Topology.
117 	 */
118 	for (k = 0; k < data->k; k++)
119 		data->streams[k].stream_type =
120 			enforce_type0 ? DRM_MODE_HDCP_CONTENT_TYPE0 : DRM_MODE_HDCP_CONTENT_TYPE1;
121 
122 	return 0;
123 }
124 
125 static int intel_hdcp_prepare_streams(struct intel_connector *connector)
126 {
127 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
128 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
129 	struct intel_hdcp *hdcp = &connector->hdcp;
130 	int ret;
131 
132 	if (!intel_encoder_is_mst(intel_attached_encoder(connector))) {
133 		data->k = 1;
134 		data->streams[0].stream_type = hdcp->content_type;
135 	} else {
136 		ret = intel_hdcp_required_content_stream(dig_port);
137 		if (ret)
138 			return ret;
139 	}
140 
141 	return 0;
142 }
143 
144 static
145 bool intel_hdcp_is_ksv_valid(u8 *ksv)
146 {
147 	int i, ones = 0;
148 	/* KSV has 20 1's and 20 0's */
149 	for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
150 		ones += hweight8(ksv[i]);
151 	if (ones != 20)
152 		return false;
153 
154 	return true;
155 }
156 
157 static
158 int intel_hdcp_read_valid_bksv(struct intel_digital_port *dig_port,
159 			       const struct intel_hdcp_shim *shim, u8 *bksv)
160 {
161 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
162 	int ret, i, tries = 2;
163 
164 	/* HDCP spec states that we must retry the bksv if it is invalid */
165 	for (i = 0; i < tries; i++) {
166 		ret = shim->read_bksv(dig_port, bksv);
167 		if (ret)
168 			return ret;
169 		if (intel_hdcp_is_ksv_valid(bksv))
170 			break;
171 	}
172 	if (i == tries) {
173 		drm_dbg_kms(&i915->drm, "Bksv is invalid\n");
174 		return -ENODEV;
175 	}
176 
177 	return 0;
178 }
179 
180 /* Is HDCP1.4 capable on Platform and Sink */
181 bool intel_hdcp_capable(struct intel_connector *connector)
182 {
183 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
184 	const struct intel_hdcp_shim *shim = connector->hdcp.shim;
185 	bool capable = false;
186 	u8 bksv[5];
187 
188 	if (!shim)
189 		return capable;
190 
191 	if (shim->hdcp_capable) {
192 		shim->hdcp_capable(dig_port, &capable);
193 	} else {
194 		if (!intel_hdcp_read_valid_bksv(dig_port, shim, bksv))
195 			capable = true;
196 	}
197 
198 	return capable;
199 }
200 
201 /* Is HDCP2.2 capable on Platform and Sink */
202 bool intel_hdcp2_capable(struct intel_connector *connector)
203 {
204 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
205 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
206 	struct intel_hdcp *hdcp = &connector->hdcp;
207 	bool capable = false;
208 
209 	/* I915 support for HDCP2.2 */
210 	if (!hdcp->hdcp2_supported)
211 		return false;
212 
213 	/* If MTL+ make sure gsc is loaded and proxy is setup */
214 	if (intel_hdcp_gsc_cs_required(dev_priv)) {
215 		struct intel_gt *gt = dev_priv->media_gt;
216 		struct intel_gsc_uc *gsc = gt ? &gt->uc.gsc : NULL;
217 
218 		if (!gsc || !intel_uc_fw_is_running(&gsc->fw))
219 			return false;
220 	}
221 
222 	/* MEI/GSC interface is solid depending on which is used */
223 	mutex_lock(&dev_priv->display.hdcp.comp_mutex);
224 	if (!dev_priv->display.hdcp.comp_added ||  !dev_priv->display.hdcp.master) {
225 		mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
226 		return false;
227 	}
228 	mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
229 
230 	/* Sink's capability for HDCP2.2 */
231 	hdcp->shim->hdcp_2_2_capable(dig_port, &capable);
232 
233 	return capable;
234 }
235 
236 static bool intel_hdcp_in_use(struct drm_i915_private *dev_priv,
237 			      enum transcoder cpu_transcoder, enum port port)
238 {
239 	return intel_de_read(dev_priv,
240 	                     HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
241 	       HDCP_STATUS_ENC;
242 }
243 
244 static bool intel_hdcp2_in_use(struct drm_i915_private *dev_priv,
245 			       enum transcoder cpu_transcoder, enum port port)
246 {
247 	return intel_de_read(dev_priv,
248 	                     HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
249 	       LINK_ENCRYPTION_STATUS;
250 }
251 
252 static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port,
253 				    const struct intel_hdcp_shim *shim)
254 {
255 	int ret, read_ret;
256 	bool ksv_ready;
257 
258 	/* Poll for ksv list ready (spec says max time allowed is 5s) */
259 	ret = __wait_for(read_ret = shim->read_ksv_ready(dig_port,
260 							 &ksv_ready),
261 			 read_ret || ksv_ready, 5 * 1000 * 1000, 1000,
262 			 100 * 1000);
263 	if (ret)
264 		return ret;
265 	if (read_ret)
266 		return read_ret;
267 	if (!ksv_ready)
268 		return -ETIMEDOUT;
269 
270 	return 0;
271 }
272 
273 static bool hdcp_key_loadable(struct drm_i915_private *dev_priv)
274 {
275 	enum i915_power_well_id id;
276 	intel_wakeref_t wakeref;
277 	bool enabled = false;
278 
279 	/*
280 	 * On HSW and BDW, Display HW loads the Key as soon as Display resumes.
281 	 * On all BXT+, SW can load the keys only when the PW#1 is turned on.
282 	 */
283 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
284 		id = HSW_DISP_PW_GLOBAL;
285 	else
286 		id = SKL_DISP_PW_1;
287 
288 	/* PG1 (power well #1) needs to be enabled */
289 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
290 		enabled = intel_display_power_well_is_enabled(dev_priv, id);
291 
292 	/*
293 	 * Another req for hdcp key loadability is enabled state of pll for
294 	 * cdclk. Without active crtc we wont land here. So we are assuming that
295 	 * cdclk is already on.
296 	 */
297 
298 	return enabled;
299 }
300 
301 static void intel_hdcp_clear_keys(struct drm_i915_private *dev_priv)
302 {
303 	intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
304 	intel_de_write(dev_priv, HDCP_KEY_STATUS,
305 		       HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS | HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE);
306 }
307 
308 static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
309 {
310 	int ret;
311 	u32 val;
312 
313 	val = intel_de_read(dev_priv, HDCP_KEY_STATUS);
314 	if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS))
315 		return 0;
316 
317 	/*
318 	 * On HSW and BDW HW loads the HDCP1.4 Key when Display comes
319 	 * out of reset. So if Key is not already loaded, its an error state.
320 	 */
321 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
322 		if (!(intel_de_read(dev_priv, HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
323 			return -ENXIO;
324 
325 	/*
326 	 * Initiate loading the HDCP key from fuses.
327 	 *
328 	 * BXT+ platforms, HDCP key needs to be loaded by SW. Only display
329 	 * version 9 platforms (minus BXT) differ in the key load trigger
330 	 * process from other platforms. These platforms use the GT Driver
331 	 * Mailbox interface.
332 	 */
333 	if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) {
334 		ret = snb_pcode_write(&dev_priv->uncore, SKL_PCODE_LOAD_HDCP_KEYS, 1);
335 		if (ret) {
336 			drm_err(&dev_priv->drm,
337 				"Failed to initiate HDCP key load (%d)\n",
338 				ret);
339 			return ret;
340 		}
341 	} else {
342 		intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
343 	}
344 
345 	/* Wait for the keys to load (500us) */
346 	ret = __intel_wait_for_register(&dev_priv->uncore, HDCP_KEY_STATUS,
347 					HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
348 					10, 1, &val);
349 	if (ret)
350 		return ret;
351 	else if (!(val & HDCP_KEY_LOAD_STATUS))
352 		return -ENXIO;
353 
354 	/* Send Aksv over to PCH display for use in authentication */
355 	intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);
356 
357 	return 0;
358 }
359 
360 /* Returns updated SHA-1 index */
361 static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
362 {
363 	intel_de_write(dev_priv, HDCP_SHA_TEXT, sha_text);
364 	if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) {
365 		drm_err(&dev_priv->drm, "Timed out waiting for SHA1 ready\n");
366 		return -ETIMEDOUT;
367 	}
368 	return 0;
369 }
370 
371 static
372 u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *dev_priv,
373 				enum transcoder cpu_transcoder, enum port port)
374 {
375 	if (DISPLAY_VER(dev_priv) >= 12) {
376 		switch (cpu_transcoder) {
377 		case TRANSCODER_A:
378 			return HDCP_TRANSA_REP_PRESENT |
379 			       HDCP_TRANSA_SHA1_M0;
380 		case TRANSCODER_B:
381 			return HDCP_TRANSB_REP_PRESENT |
382 			       HDCP_TRANSB_SHA1_M0;
383 		case TRANSCODER_C:
384 			return HDCP_TRANSC_REP_PRESENT |
385 			       HDCP_TRANSC_SHA1_M0;
386 		case TRANSCODER_D:
387 			return HDCP_TRANSD_REP_PRESENT |
388 			       HDCP_TRANSD_SHA1_M0;
389 		default:
390 			drm_err(&dev_priv->drm, "Unknown transcoder %d\n",
391 				cpu_transcoder);
392 			return -EINVAL;
393 		}
394 	}
395 
396 	switch (port) {
397 	case PORT_A:
398 		return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0;
399 	case PORT_B:
400 		return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0;
401 	case PORT_C:
402 		return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0;
403 	case PORT_D:
404 		return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0;
405 	case PORT_E:
406 		return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
407 	default:
408 		drm_err(&dev_priv->drm, "Unknown port %d\n", port);
409 		return -EINVAL;
410 	}
411 }
412 
413 static
414 int intel_hdcp_validate_v_prime(struct intel_connector *connector,
415 				const struct intel_hdcp_shim *shim,
416 				u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
417 {
418 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
419 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
420 	enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
421 	enum port port = dig_port->base.port;
422 	u32 vprime, sha_text, sha_leftovers, rep_ctl;
423 	int ret, i, j, sha_idx;
424 
425 	/* Process V' values from the receiver */
426 	for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
427 		ret = shim->read_v_prime_part(dig_port, i, &vprime);
428 		if (ret)
429 			return ret;
430 		intel_de_write(dev_priv, HDCP_SHA_V_PRIME(i), vprime);
431 	}
432 
433 	/*
434 	 * We need to write the concatenation of all device KSVs, BINFO (DP) ||
435 	 * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte
436 	 * stream is written via the HDCP_SHA_TEXT register in 32-bit
437 	 * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This
438 	 * index will keep track of our progress through the 64 bytes as well as
439 	 * helping us work the 40-bit KSVs through our 32-bit register.
440 	 *
441 	 * NOTE: data passed via HDCP_SHA_TEXT should be big-endian
442 	 */
443 	sha_idx = 0;
444 	sha_text = 0;
445 	sha_leftovers = 0;
446 	rep_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port);
447 	intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
448 	for (i = 0; i < num_downstream; i++) {
449 		unsigned int sha_empty;
450 		u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN];
451 
452 		/* Fill up the empty slots in sha_text and write it out */
453 		sha_empty = sizeof(sha_text) - sha_leftovers;
454 		for (j = 0; j < sha_empty; j++) {
455 			u8 off = ((sizeof(sha_text) - j - 1 - sha_leftovers) * 8);
456 			sha_text |= ksv[j] << off;
457 		}
458 
459 		ret = intel_write_sha_text(dev_priv, sha_text);
460 		if (ret < 0)
461 			return ret;
462 
463 		/* Programming guide writes this every 64 bytes */
464 		sha_idx += sizeof(sha_text);
465 		if (!(sha_idx % 64))
466 			intel_de_write(dev_priv, HDCP_REP_CTL,
467 				       rep_ctl | HDCP_SHA1_TEXT_32);
468 
469 		/* Store the leftover bytes from the ksv in sha_text */
470 		sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty;
471 		sha_text = 0;
472 		for (j = 0; j < sha_leftovers; j++)
473 			sha_text |= ksv[sha_empty + j] <<
474 					((sizeof(sha_text) - j - 1) * 8);
475 
476 		/*
477 		 * If we still have room in sha_text for more data, continue.
478 		 * Otherwise, write it out immediately.
479 		 */
480 		if (sizeof(sha_text) > sha_leftovers)
481 			continue;
482 
483 		ret = intel_write_sha_text(dev_priv, sha_text);
484 		if (ret < 0)
485 			return ret;
486 		sha_leftovers = 0;
487 		sha_text = 0;
488 		sha_idx += sizeof(sha_text);
489 	}
490 
491 	/*
492 	 * We need to write BINFO/BSTATUS, and M0 now. Depending on how many
493 	 * bytes are leftover from the last ksv, we might be able to fit them
494 	 * all in sha_text (first 2 cases), or we might need to split them up
495 	 * into 2 writes (last 2 cases).
496 	 */
497 	if (sha_leftovers == 0) {
498 		/* Write 16 bits of text, 16 bits of M0 */
499 		intel_de_write(dev_priv, HDCP_REP_CTL,
500 			       rep_ctl | HDCP_SHA1_TEXT_16);
501 		ret = intel_write_sha_text(dev_priv,
502 					   bstatus[0] << 8 | bstatus[1]);
503 		if (ret < 0)
504 			return ret;
505 		sha_idx += sizeof(sha_text);
506 
507 		/* Write 32 bits of M0 */
508 		intel_de_write(dev_priv, HDCP_REP_CTL,
509 			       rep_ctl | HDCP_SHA1_TEXT_0);
510 		ret = intel_write_sha_text(dev_priv, 0);
511 		if (ret < 0)
512 			return ret;
513 		sha_idx += sizeof(sha_text);
514 
515 		/* Write 16 bits of M0 */
516 		intel_de_write(dev_priv, HDCP_REP_CTL,
517 			       rep_ctl | HDCP_SHA1_TEXT_16);
518 		ret = intel_write_sha_text(dev_priv, 0);
519 		if (ret < 0)
520 			return ret;
521 		sha_idx += sizeof(sha_text);
522 
523 	} else if (sha_leftovers == 1) {
524 		/* Write 24 bits of text, 8 bits of M0 */
525 		intel_de_write(dev_priv, HDCP_REP_CTL,
526 			       rep_ctl | HDCP_SHA1_TEXT_24);
527 		sha_text |= bstatus[0] << 16 | bstatus[1] << 8;
528 		/* Only 24-bits of data, must be in the LSB */
529 		sha_text = (sha_text & 0xffffff00) >> 8;
530 		ret = intel_write_sha_text(dev_priv, sha_text);
531 		if (ret < 0)
532 			return ret;
533 		sha_idx += sizeof(sha_text);
534 
535 		/* Write 32 bits of M0 */
536 		intel_de_write(dev_priv, HDCP_REP_CTL,
537 			       rep_ctl | HDCP_SHA1_TEXT_0);
538 		ret = intel_write_sha_text(dev_priv, 0);
539 		if (ret < 0)
540 			return ret;
541 		sha_idx += sizeof(sha_text);
542 
543 		/* Write 24 bits of M0 */
544 		intel_de_write(dev_priv, HDCP_REP_CTL,
545 			       rep_ctl | HDCP_SHA1_TEXT_8);
546 		ret = intel_write_sha_text(dev_priv, 0);
547 		if (ret < 0)
548 			return ret;
549 		sha_idx += sizeof(sha_text);
550 
551 	} else if (sha_leftovers == 2) {
552 		/* Write 32 bits of text */
553 		intel_de_write(dev_priv, HDCP_REP_CTL,
554 			       rep_ctl | HDCP_SHA1_TEXT_32);
555 		sha_text |= bstatus[0] << 8 | bstatus[1];
556 		ret = intel_write_sha_text(dev_priv, sha_text);
557 		if (ret < 0)
558 			return ret;
559 		sha_idx += sizeof(sha_text);
560 
561 		/* Write 64 bits of M0 */
562 		intel_de_write(dev_priv, HDCP_REP_CTL,
563 			       rep_ctl | HDCP_SHA1_TEXT_0);
564 		for (i = 0; i < 2; i++) {
565 			ret = intel_write_sha_text(dev_priv, 0);
566 			if (ret < 0)
567 				return ret;
568 			sha_idx += sizeof(sha_text);
569 		}
570 
571 		/*
572 		 * Terminate the SHA-1 stream by hand. For the other leftover
573 		 * cases this is appended by the hardware.
574 		 */
575 		intel_de_write(dev_priv, HDCP_REP_CTL,
576 			       rep_ctl | HDCP_SHA1_TEXT_32);
577 		sha_text = DRM_HDCP_SHA1_TERMINATOR << 24;
578 		ret = intel_write_sha_text(dev_priv, sha_text);
579 		if (ret < 0)
580 			return ret;
581 		sha_idx += sizeof(sha_text);
582 	} else if (sha_leftovers == 3) {
583 		/* Write 32 bits of text (filled from LSB) */
584 		intel_de_write(dev_priv, HDCP_REP_CTL,
585 			       rep_ctl | HDCP_SHA1_TEXT_32);
586 		sha_text |= bstatus[0];
587 		ret = intel_write_sha_text(dev_priv, sha_text);
588 		if (ret < 0)
589 			return ret;
590 		sha_idx += sizeof(sha_text);
591 
592 		/* Write 8 bits of text (filled from LSB), 24 bits of M0 */
593 		intel_de_write(dev_priv, HDCP_REP_CTL,
594 			       rep_ctl | HDCP_SHA1_TEXT_8);
595 		ret = intel_write_sha_text(dev_priv, bstatus[1]);
596 		if (ret < 0)
597 			return ret;
598 		sha_idx += sizeof(sha_text);
599 
600 		/* Write 32 bits of M0 */
601 		intel_de_write(dev_priv, HDCP_REP_CTL,
602 			       rep_ctl | HDCP_SHA1_TEXT_0);
603 		ret = intel_write_sha_text(dev_priv, 0);
604 		if (ret < 0)
605 			return ret;
606 		sha_idx += sizeof(sha_text);
607 
608 		/* Write 8 bits of M0 */
609 		intel_de_write(dev_priv, HDCP_REP_CTL,
610 			       rep_ctl | HDCP_SHA1_TEXT_24);
611 		ret = intel_write_sha_text(dev_priv, 0);
612 		if (ret < 0)
613 			return ret;
614 		sha_idx += sizeof(sha_text);
615 	} else {
616 		drm_dbg_kms(&dev_priv->drm, "Invalid number of leftovers %d\n",
617 			    sha_leftovers);
618 		return -EINVAL;
619 	}
620 
621 	intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
622 	/* Fill up to 64-4 bytes with zeros (leave the last write for length) */
623 	while ((sha_idx % 64) < (64 - sizeof(sha_text))) {
624 		ret = intel_write_sha_text(dev_priv, 0);
625 		if (ret < 0)
626 			return ret;
627 		sha_idx += sizeof(sha_text);
628 	}
629 
630 	/*
631 	 * Last write gets the length of the concatenation in bits. That is:
632 	 *  - 5 bytes per device
633 	 *  - 10 bytes for BINFO/BSTATUS(2), M0(8)
634 	 */
635 	sha_text = (num_downstream * 5 + 10) * 8;
636 	ret = intel_write_sha_text(dev_priv, sha_text);
637 	if (ret < 0)
638 		return ret;
639 
640 	/* Tell the HW we're done with the hash and wait for it to ACK */
641 	intel_de_write(dev_priv, HDCP_REP_CTL,
642 		       rep_ctl | HDCP_SHA1_COMPLETE_HASH);
643 	if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL,
644 				  HDCP_SHA1_COMPLETE, 1)) {
645 		drm_err(&dev_priv->drm, "Timed out waiting for SHA1 complete\n");
646 		return -ETIMEDOUT;
647 	}
648 	if (!(intel_de_read(dev_priv, HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
649 		drm_dbg_kms(&dev_priv->drm, "SHA-1 mismatch, HDCP failed\n");
650 		return -ENXIO;
651 	}
652 
653 	return 0;
654 }
655 
656 /* Implements Part 2 of the HDCP authorization procedure */
657 static
658 int intel_hdcp_auth_downstream(struct intel_connector *connector)
659 {
660 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
661 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
662 	const struct intel_hdcp_shim *shim = connector->hdcp.shim;
663 	u8 bstatus[2], num_downstream, *ksv_fifo;
664 	int ret, i, tries = 3;
665 
666 	ret = intel_hdcp_poll_ksv_fifo(dig_port, shim);
667 	if (ret) {
668 		drm_dbg_kms(&dev_priv->drm,
669 			    "KSV list failed to become ready (%d)\n", ret);
670 		return ret;
671 	}
672 
673 	ret = shim->read_bstatus(dig_port, bstatus);
674 	if (ret)
675 		return ret;
676 
677 	if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
678 	    DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
679 		drm_dbg_kms(&dev_priv->drm, "Max Topology Limit Exceeded\n");
680 		return -EPERM;
681 	}
682 
683 	/*
684 	 * When repeater reports 0 device count, HDCP1.4 spec allows disabling
685 	 * the HDCP encryption. That implies that repeater can't have its own
686 	 * display. As there is no consumption of encrypted content in the
687 	 * repeater with 0 downstream devices, we are failing the
688 	 * authentication.
689 	 */
690 	num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
691 	if (num_downstream == 0) {
692 		drm_dbg_kms(&dev_priv->drm,
693 			    "Repeater with zero downstream devices\n");
694 		return -EINVAL;
695 	}
696 
697 	ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
698 	if (!ksv_fifo) {
699 		drm_dbg_kms(&dev_priv->drm, "Out of mem: ksv_fifo\n");
700 		return -ENOMEM;
701 	}
702 
703 	ret = shim->read_ksv_fifo(dig_port, num_downstream, ksv_fifo);
704 	if (ret)
705 		goto err;
706 
707 	if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, ksv_fifo,
708 					num_downstream) > 0) {
709 		drm_err(&dev_priv->drm, "Revoked Ksv(s) in ksv_fifo\n");
710 		ret = -EPERM;
711 		goto err;
712 	}
713 
714 	/*
715 	 * When V prime mismatches, DP Spec mandates re-read of
716 	 * V prime atleast twice.
717 	 */
718 	for (i = 0; i < tries; i++) {
719 		ret = intel_hdcp_validate_v_prime(connector, shim,
720 						  ksv_fifo, num_downstream,
721 						  bstatus);
722 		if (!ret)
723 			break;
724 	}
725 
726 	if (i == tries) {
727 		drm_dbg_kms(&dev_priv->drm,
728 			    "V Prime validation failed.(%d)\n", ret);
729 		goto err;
730 	}
731 
732 	drm_dbg_kms(&dev_priv->drm, "HDCP is enabled (%d downstream devices)\n",
733 		    num_downstream);
734 	ret = 0;
735 err:
736 	kfree(ksv_fifo);
737 	return ret;
738 }
739 
740 /* Implements Part 1 of the HDCP authorization procedure */
741 static int intel_hdcp_auth(struct intel_connector *connector)
742 {
743 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
744 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
745 	struct intel_hdcp *hdcp = &connector->hdcp;
746 	const struct intel_hdcp_shim *shim = hdcp->shim;
747 	enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
748 	enum port port = dig_port->base.port;
749 	unsigned long r0_prime_gen_start;
750 	int ret, i, tries = 2;
751 	union {
752 		u32 reg[2];
753 		u8 shim[DRM_HDCP_AN_LEN];
754 	} an;
755 	union {
756 		u32 reg[2];
757 		u8 shim[DRM_HDCP_KSV_LEN];
758 	} bksv;
759 	union {
760 		u32 reg;
761 		u8 shim[DRM_HDCP_RI_LEN];
762 	} ri;
763 	bool repeater_present, hdcp_capable;
764 
765 	/*
766 	 * Detects whether the display is HDCP capable. Although we check for
767 	 * valid Bksv below, the HDCP over DP spec requires that we check
768 	 * whether the display supports HDCP before we write An. For HDMI
769 	 * displays, this is not necessary.
770 	 */
771 	if (shim->hdcp_capable) {
772 		ret = shim->hdcp_capable(dig_port, &hdcp_capable);
773 		if (ret)
774 			return ret;
775 		if (!hdcp_capable) {
776 			drm_dbg_kms(&dev_priv->drm,
777 				    "Panel is not HDCP capable\n");
778 			return -EINVAL;
779 		}
780 	}
781 
782 	/* Initialize An with 2 random values and acquire it */
783 	for (i = 0; i < 2; i++)
784 		intel_de_write(dev_priv,
785 			       HDCP_ANINIT(dev_priv, cpu_transcoder, port),
786 			       get_random_u32());
787 	intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port),
788 		       HDCP_CONF_CAPTURE_AN);
789 
790 	/* Wait for An to be acquired */
791 	if (intel_de_wait_for_set(dev_priv,
792 				  HDCP_STATUS(dev_priv, cpu_transcoder, port),
793 				  HDCP_STATUS_AN_READY, 1)) {
794 		drm_err(&dev_priv->drm, "Timed out waiting for An\n");
795 		return -ETIMEDOUT;
796 	}
797 
798 	an.reg[0] = intel_de_read(dev_priv,
799 				  HDCP_ANLO(dev_priv, cpu_transcoder, port));
800 	an.reg[1] = intel_de_read(dev_priv,
801 				  HDCP_ANHI(dev_priv, cpu_transcoder, port));
802 	ret = shim->write_an_aksv(dig_port, an.shim);
803 	if (ret)
804 		return ret;
805 
806 	r0_prime_gen_start = jiffies;
807 
808 	memset(&bksv, 0, sizeof(bksv));
809 
810 	ret = intel_hdcp_read_valid_bksv(dig_port, shim, bksv.shim);
811 	if (ret < 0)
812 		return ret;
813 
814 	if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, bksv.shim, 1) > 0) {
815 		drm_err(&dev_priv->drm, "BKSV is revoked\n");
816 		return -EPERM;
817 	}
818 
819 	intel_de_write(dev_priv, HDCP_BKSVLO(dev_priv, cpu_transcoder, port),
820 		       bksv.reg[0]);
821 	intel_de_write(dev_priv, HDCP_BKSVHI(dev_priv, cpu_transcoder, port),
822 		       bksv.reg[1]);
823 
824 	ret = shim->repeater_present(dig_port, &repeater_present);
825 	if (ret)
826 		return ret;
827 	if (repeater_present)
828 		intel_de_write(dev_priv, HDCP_REP_CTL,
829 			       intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port));
830 
831 	ret = shim->toggle_signalling(dig_port, cpu_transcoder, true);
832 	if (ret)
833 		return ret;
834 
835 	intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port),
836 		       HDCP_CONF_AUTH_AND_ENC);
837 
838 	/* Wait for R0 ready */
839 	if (wait_for(intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
840 		     (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
841 		drm_err(&dev_priv->drm, "Timed out waiting for R0 ready\n");
842 		return -ETIMEDOUT;
843 	}
844 
845 	/*
846 	 * Wait for R0' to become available. The spec says 100ms from Aksv, but
847 	 * some monitors can take longer than this. We'll set the timeout at
848 	 * 300ms just to be sure.
849 	 *
850 	 * On DP, there's an R0_READY bit available but no such bit
851 	 * exists on HDMI. Since the upper-bound is the same, we'll just do
852 	 * the stupid thing instead of polling on one and not the other.
853 	 */
854 	wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300);
855 
856 	tries = 3;
857 
858 	/*
859 	 * DP HDCP Spec mandates the two more reattempt to read R0, incase
860 	 * of R0 mismatch.
861 	 */
862 	for (i = 0; i < tries; i++) {
863 		ri.reg = 0;
864 		ret = shim->read_ri_prime(dig_port, ri.shim);
865 		if (ret)
866 			return ret;
867 		intel_de_write(dev_priv,
868 			       HDCP_RPRIME(dev_priv, cpu_transcoder, port),
869 			       ri.reg);
870 
871 		/* Wait for Ri prime match */
872 		if (!wait_for(intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
873 			      (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
874 			break;
875 	}
876 
877 	if (i == tries) {
878 		drm_dbg_kms(&dev_priv->drm,
879 			    "Timed out waiting for Ri prime match (%x)\n",
880 			    intel_de_read(dev_priv, HDCP_STATUS(dev_priv,
881 					  cpu_transcoder, port)));
882 		return -ETIMEDOUT;
883 	}
884 
885 	/* Wait for encryption confirmation */
886 	if (intel_de_wait_for_set(dev_priv,
887 				  HDCP_STATUS(dev_priv, cpu_transcoder, port),
888 				  HDCP_STATUS_ENC,
889 				  HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
890 		drm_err(&dev_priv->drm, "Timed out waiting for encryption\n");
891 		return -ETIMEDOUT;
892 	}
893 
894 	/* DP MST Auth Part 1 Step 2.a and Step 2.b */
895 	if (shim->stream_encryption) {
896 		ret = shim->stream_encryption(connector, true);
897 		if (ret) {
898 			drm_err(&dev_priv->drm, "[%s:%d] Failed to enable HDCP 1.4 stream enc\n",
899 				connector->base.name, connector->base.base.id);
900 			return ret;
901 		}
902 		drm_dbg_kms(&dev_priv->drm, "HDCP 1.4 transcoder: %s stream encrypted\n",
903 			    transcoder_name(hdcp->stream_transcoder));
904 	}
905 
906 	if (repeater_present)
907 		return intel_hdcp_auth_downstream(connector);
908 
909 	drm_dbg_kms(&dev_priv->drm, "HDCP is enabled (no repeater present)\n");
910 	return 0;
911 }
912 
913 static int _intel_hdcp_disable(struct intel_connector *connector)
914 {
915 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
916 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
917 	struct intel_hdcp *hdcp = &connector->hdcp;
918 	enum port port = dig_port->base.port;
919 	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
920 	u32 repeater_ctl;
921 	int ret;
922 
923 	drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being disabled...\n",
924 		    connector->base.name, connector->base.base.id);
925 
926 	if (hdcp->shim->stream_encryption) {
927 		ret = hdcp->shim->stream_encryption(connector, false);
928 		if (ret) {
929 			drm_err(&dev_priv->drm, "[%s:%d] Failed to disable HDCP 1.4 stream enc\n",
930 				connector->base.name, connector->base.base.id);
931 			return ret;
932 		}
933 		drm_dbg_kms(&dev_priv->drm, "HDCP 1.4 transcoder: %s stream encryption disabled\n",
934 			    transcoder_name(hdcp->stream_transcoder));
935 		/*
936 		 * If there are other connectors on this port using HDCP,
937 		 * don't disable it until it disabled HDCP encryption for
938 		 * all connectors in MST topology.
939 		 */
940 		if (dig_port->num_hdcp_streams > 0)
941 			return 0;
942 	}
943 
944 	hdcp->hdcp_encrypted = false;
945 	intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port), 0);
946 	if (intel_de_wait_for_clear(dev_priv,
947 				    HDCP_STATUS(dev_priv, cpu_transcoder, port),
948 				    ~0, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
949 		drm_err(&dev_priv->drm,
950 			"Failed to disable HDCP, timeout clearing status\n");
951 		return -ETIMEDOUT;
952 	}
953 
954 	repeater_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder,
955 						   port);
956 	intel_de_rmw(dev_priv, HDCP_REP_CTL, repeater_ctl, 0);
957 
958 	ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, false);
959 	if (ret) {
960 		drm_err(&dev_priv->drm, "Failed to disable HDCP signalling\n");
961 		return ret;
962 	}
963 
964 	drm_dbg_kms(&dev_priv->drm, "HDCP is disabled\n");
965 	return 0;
966 }
967 
968 static int _intel_hdcp_enable(struct intel_connector *connector)
969 {
970 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
971 	struct intel_hdcp *hdcp = &connector->hdcp;
972 	int i, ret, tries = 3;
973 
974 	drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being enabled...\n",
975 		    connector->base.name, connector->base.base.id);
976 
977 	if (!hdcp_key_loadable(dev_priv)) {
978 		drm_err(&dev_priv->drm, "HDCP key Load is not possible\n");
979 		return -ENXIO;
980 	}
981 
982 	for (i = 0; i < KEY_LOAD_TRIES; i++) {
983 		ret = intel_hdcp_load_keys(dev_priv);
984 		if (!ret)
985 			break;
986 		intel_hdcp_clear_keys(dev_priv);
987 	}
988 	if (ret) {
989 		drm_err(&dev_priv->drm, "Could not load HDCP keys, (%d)\n",
990 			ret);
991 		return ret;
992 	}
993 
994 	/* Incase of authentication failures, HDCP spec expects reauth. */
995 	for (i = 0; i < tries; i++) {
996 		ret = intel_hdcp_auth(connector);
997 		if (!ret) {
998 			hdcp->hdcp_encrypted = true;
999 			return 0;
1000 		}
1001 
1002 		drm_dbg_kms(&dev_priv->drm, "HDCP Auth failure (%d)\n", ret);
1003 
1004 		/* Ensuring HDCP encryption and signalling are stopped. */
1005 		_intel_hdcp_disable(connector);
1006 	}
1007 
1008 	drm_dbg_kms(&dev_priv->drm,
1009 		    "HDCP authentication failed (%d tries/%d)\n", tries, ret);
1010 	return ret;
1011 }
1012 
1013 static struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
1014 {
1015 	return container_of(hdcp, struct intel_connector, hdcp);
1016 }
1017 
1018 static void intel_hdcp_update_value(struct intel_connector *connector,
1019 				    u64 value, bool update_property)
1020 {
1021 	struct drm_device *dev = connector->base.dev;
1022 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1023 	struct intel_hdcp *hdcp = &connector->hdcp;
1024 
1025 	drm_WARN_ON(connector->base.dev, !mutex_is_locked(&hdcp->mutex));
1026 
1027 	if (hdcp->value == value)
1028 		return;
1029 
1030 	drm_WARN_ON(dev, !mutex_is_locked(&dig_port->hdcp_mutex));
1031 
1032 	if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
1033 		if (!drm_WARN_ON(dev, dig_port->num_hdcp_streams == 0))
1034 			dig_port->num_hdcp_streams--;
1035 	} else if (value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
1036 		dig_port->num_hdcp_streams++;
1037 	}
1038 
1039 	hdcp->value = value;
1040 	if (update_property) {
1041 		drm_connector_get(&connector->base);
1042 		schedule_work(&hdcp->prop_work);
1043 	}
1044 }
1045 
1046 /* Implements Part 3 of the HDCP authorization procedure */
1047 static int intel_hdcp_check_link(struct intel_connector *connector)
1048 {
1049 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1050 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1051 	struct intel_hdcp *hdcp = &connector->hdcp;
1052 	enum port port = dig_port->base.port;
1053 	enum transcoder cpu_transcoder;
1054 	int ret = 0;
1055 
1056 	mutex_lock(&hdcp->mutex);
1057 	mutex_lock(&dig_port->hdcp_mutex);
1058 
1059 	cpu_transcoder = hdcp->cpu_transcoder;
1060 
1061 	/* Check_link valid only when HDCP1.4 is enabled */
1062 	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
1063 	    !hdcp->hdcp_encrypted) {
1064 		ret = -EINVAL;
1065 		goto out;
1066 	}
1067 
1068 	if (drm_WARN_ON(&dev_priv->drm,
1069 			!intel_hdcp_in_use(dev_priv, cpu_transcoder, port))) {
1070 		drm_err(&dev_priv->drm,
1071 			"%s:%d HDCP link stopped encryption,%x\n",
1072 			connector->base.name, connector->base.base.id,
1073 			intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)));
1074 		ret = -ENXIO;
1075 		intel_hdcp_update_value(connector,
1076 					DRM_MODE_CONTENT_PROTECTION_DESIRED,
1077 					true);
1078 		goto out;
1079 	}
1080 
1081 	if (hdcp->shim->check_link(dig_port, connector)) {
1082 		if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
1083 			intel_hdcp_update_value(connector,
1084 				DRM_MODE_CONTENT_PROTECTION_ENABLED, true);
1085 		}
1086 		goto out;
1087 	}
1088 
1089 	drm_dbg_kms(&dev_priv->drm,
1090 		    "[%s:%d] HDCP link failed, retrying authentication\n",
1091 		    connector->base.name, connector->base.base.id);
1092 
1093 	ret = _intel_hdcp_disable(connector);
1094 	if (ret) {
1095 		drm_err(&dev_priv->drm, "Failed to disable hdcp (%d)\n", ret);
1096 		intel_hdcp_update_value(connector,
1097 					DRM_MODE_CONTENT_PROTECTION_DESIRED,
1098 					true);
1099 		goto out;
1100 	}
1101 
1102 	ret = _intel_hdcp_enable(connector);
1103 	if (ret) {
1104 		drm_err(&dev_priv->drm, "Failed to enable hdcp (%d)\n", ret);
1105 		intel_hdcp_update_value(connector,
1106 					DRM_MODE_CONTENT_PROTECTION_DESIRED,
1107 					true);
1108 		goto out;
1109 	}
1110 
1111 out:
1112 	mutex_unlock(&dig_port->hdcp_mutex);
1113 	mutex_unlock(&hdcp->mutex);
1114 	return ret;
1115 }
1116 
1117 static void intel_hdcp_prop_work(struct work_struct *work)
1118 {
1119 	struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp,
1120 					       prop_work);
1121 	struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
1122 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1123 
1124 	drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, NULL);
1125 	mutex_lock(&hdcp->mutex);
1126 
1127 	/*
1128 	 * This worker is only used to flip between ENABLED/DESIRED. Either of
1129 	 * those to UNDESIRED is handled by core. If value == UNDESIRED,
1130 	 * we're running just after hdcp has been disabled, so just exit
1131 	 */
1132 	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
1133 		drm_hdcp_update_content_protection(&connector->base,
1134 						   hdcp->value);
1135 
1136 	mutex_unlock(&hdcp->mutex);
1137 	drm_modeset_unlock(&dev_priv->drm.mode_config.connection_mutex);
1138 
1139 	drm_connector_put(&connector->base);
1140 }
1141 
1142 bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
1143 {
1144 	return RUNTIME_INFO(dev_priv)->has_hdcp &&
1145 		(DISPLAY_VER(dev_priv) >= 12 || port < PORT_E);
1146 }
1147 
1148 static int
1149 hdcp2_prepare_ake_init(struct intel_connector *connector,
1150 		       struct hdcp2_ake_init *ake_data)
1151 {
1152 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1153 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1154 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1155 	struct i915_hdcp_master *arbiter;
1156 	int ret;
1157 
1158 	mutex_lock(&dev_priv->display.hdcp.comp_mutex);
1159 	arbiter = dev_priv->display.hdcp.master;
1160 
1161 	if (!arbiter || !arbiter->ops) {
1162 		mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1163 		return -EINVAL;
1164 	}
1165 
1166 	ret = arbiter->ops->initiate_hdcp2_session(arbiter->hdcp_dev, data, ake_data);
1167 	if (ret)
1168 		drm_dbg_kms(&dev_priv->drm, "Prepare_ake_init failed. %d\n",
1169 			    ret);
1170 	mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1171 
1172 	return ret;
1173 }
1174 
1175 static int
1176 hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
1177 				struct hdcp2_ake_send_cert *rx_cert,
1178 				bool *paired,
1179 				struct hdcp2_ake_no_stored_km *ek_pub_km,
1180 				size_t *msg_sz)
1181 {
1182 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1183 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1184 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1185 	struct i915_hdcp_master *arbiter;
1186 	int ret;
1187 
1188 	mutex_lock(&dev_priv->display.hdcp.comp_mutex);
1189 	arbiter = dev_priv->display.hdcp.master;
1190 
1191 	if (!arbiter || !arbiter->ops) {
1192 		mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1193 		return -EINVAL;
1194 	}
1195 
1196 	ret = arbiter->ops->verify_receiver_cert_prepare_km(arbiter->hdcp_dev, data,
1197 							 rx_cert, paired,
1198 							 ek_pub_km, msg_sz);
1199 	if (ret < 0)
1200 		drm_dbg_kms(&dev_priv->drm, "Verify rx_cert failed. %d\n",
1201 			    ret);
1202 	mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1203 
1204 	return ret;
1205 }
1206 
1207 static int hdcp2_verify_hprime(struct intel_connector *connector,
1208 			       struct hdcp2_ake_send_hprime *rx_hprime)
1209 {
1210 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1211 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1212 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1213 	struct i915_hdcp_master *arbiter;
1214 	int ret;
1215 
1216 	mutex_lock(&dev_priv->display.hdcp.comp_mutex);
1217 	arbiter = dev_priv->display.hdcp.master;
1218 
1219 	if (!arbiter || !arbiter->ops) {
1220 		mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1221 		return -EINVAL;
1222 	}
1223 
1224 	ret = arbiter->ops->verify_hprime(arbiter->hdcp_dev, data, rx_hprime);
1225 	if (ret < 0)
1226 		drm_dbg_kms(&dev_priv->drm, "Verify hprime failed. %d\n", ret);
1227 	mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1228 
1229 	return ret;
1230 }
1231 
1232 static int
1233 hdcp2_store_pairing_info(struct intel_connector *connector,
1234 			 struct hdcp2_ake_send_pairing_info *pairing_info)
1235 {
1236 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1237 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1238 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1239 	struct i915_hdcp_master *arbiter;
1240 	int ret;
1241 
1242 	mutex_lock(&dev_priv->display.hdcp.comp_mutex);
1243 	arbiter = dev_priv->display.hdcp.master;
1244 
1245 	if (!arbiter || !arbiter->ops) {
1246 		mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1247 		return -EINVAL;
1248 	}
1249 
1250 	ret = arbiter->ops->store_pairing_info(arbiter->hdcp_dev, data, pairing_info);
1251 	if (ret < 0)
1252 		drm_dbg_kms(&dev_priv->drm, "Store pairing info failed. %d\n",
1253 			    ret);
1254 	mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1255 
1256 	return ret;
1257 }
1258 
1259 static int
1260 hdcp2_prepare_lc_init(struct intel_connector *connector,
1261 		      struct hdcp2_lc_init *lc_init)
1262 {
1263 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1264 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1265 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1266 	struct i915_hdcp_master *arbiter;
1267 	int ret;
1268 
1269 	mutex_lock(&dev_priv->display.hdcp.comp_mutex);
1270 	arbiter = dev_priv->display.hdcp.master;
1271 
1272 	if (!arbiter || !arbiter->ops) {
1273 		mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1274 		return -EINVAL;
1275 	}
1276 
1277 	ret = arbiter->ops->initiate_locality_check(arbiter->hdcp_dev, data, lc_init);
1278 	if (ret < 0)
1279 		drm_dbg_kms(&dev_priv->drm, "Prepare lc_init failed. %d\n",
1280 			    ret);
1281 	mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1282 
1283 	return ret;
1284 }
1285 
1286 static int
1287 hdcp2_verify_lprime(struct intel_connector *connector,
1288 		    struct hdcp2_lc_send_lprime *rx_lprime)
1289 {
1290 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1291 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1292 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1293 	struct i915_hdcp_master *arbiter;
1294 	int ret;
1295 
1296 	mutex_lock(&dev_priv->display.hdcp.comp_mutex);
1297 	arbiter = dev_priv->display.hdcp.master;
1298 
1299 	if (!arbiter || !arbiter->ops) {
1300 		mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1301 		return -EINVAL;
1302 	}
1303 
1304 	ret = arbiter->ops->verify_lprime(arbiter->hdcp_dev, data, rx_lprime);
1305 	if (ret < 0)
1306 		drm_dbg_kms(&dev_priv->drm, "Verify L_Prime failed. %d\n",
1307 			    ret);
1308 	mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1309 
1310 	return ret;
1311 }
1312 
1313 static int hdcp2_prepare_skey(struct intel_connector *connector,
1314 			      struct hdcp2_ske_send_eks *ske_data)
1315 {
1316 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1317 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1318 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1319 	struct i915_hdcp_master *arbiter;
1320 	int ret;
1321 
1322 	mutex_lock(&dev_priv->display.hdcp.comp_mutex);
1323 	arbiter = dev_priv->display.hdcp.master;
1324 
1325 	if (!arbiter || !arbiter->ops) {
1326 		mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1327 		return -EINVAL;
1328 	}
1329 
1330 	ret = arbiter->ops->get_session_key(arbiter->hdcp_dev, data, ske_data);
1331 	if (ret < 0)
1332 		drm_dbg_kms(&dev_priv->drm, "Get session key failed. %d\n",
1333 			    ret);
1334 	mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1335 
1336 	return ret;
1337 }
1338 
1339 static int
1340 hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
1341 				      struct hdcp2_rep_send_receiverid_list
1342 								*rep_topology,
1343 				      struct hdcp2_rep_send_ack *rep_send_ack)
1344 {
1345 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1346 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1347 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1348 	struct i915_hdcp_master *arbiter;
1349 	int ret;
1350 
1351 	mutex_lock(&dev_priv->display.hdcp.comp_mutex);
1352 	arbiter = dev_priv->display.hdcp.master;
1353 
1354 	if (!arbiter || !arbiter->ops) {
1355 		mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1356 		return -EINVAL;
1357 	}
1358 
1359 	ret = arbiter->ops->repeater_check_flow_prepare_ack(arbiter->hdcp_dev,
1360 							    data,
1361 							    rep_topology,
1362 							    rep_send_ack);
1363 	if (ret < 0)
1364 		drm_dbg_kms(&dev_priv->drm,
1365 			    "Verify rep topology failed. %d\n", ret);
1366 	mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1367 
1368 	return ret;
1369 }
1370 
1371 static int
1372 hdcp2_verify_mprime(struct intel_connector *connector,
1373 		    struct hdcp2_rep_stream_ready *stream_ready)
1374 {
1375 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1376 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1377 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1378 	struct i915_hdcp_master *arbiter;
1379 	int ret;
1380 
1381 	mutex_lock(&dev_priv->display.hdcp.comp_mutex);
1382 	arbiter = dev_priv->display.hdcp.master;
1383 
1384 	if (!arbiter || !arbiter->ops) {
1385 		mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1386 		return -EINVAL;
1387 	}
1388 
1389 	ret = arbiter->ops->verify_mprime(arbiter->hdcp_dev, data, stream_ready);
1390 	if (ret < 0)
1391 		drm_dbg_kms(&dev_priv->drm, "Verify mprime failed. %d\n", ret);
1392 	mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1393 
1394 	return ret;
1395 }
1396 
1397 static int hdcp2_authenticate_port(struct intel_connector *connector)
1398 {
1399 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1400 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1401 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1402 	struct i915_hdcp_master *arbiter;
1403 	int ret;
1404 
1405 	mutex_lock(&dev_priv->display.hdcp.comp_mutex);
1406 	arbiter = dev_priv->display.hdcp.master;
1407 
1408 	if (!arbiter || !arbiter->ops) {
1409 		mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1410 		return -EINVAL;
1411 	}
1412 
1413 	ret = arbiter->ops->enable_hdcp_authentication(arbiter->hdcp_dev, data);
1414 	if (ret < 0)
1415 		drm_dbg_kms(&dev_priv->drm, "Enable hdcp auth failed. %d\n",
1416 			    ret);
1417 	mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1418 
1419 	return ret;
1420 }
1421 
1422 static int hdcp2_close_session(struct intel_connector *connector)
1423 {
1424 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1425 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1426 	struct i915_hdcp_master *arbiter;
1427 	int ret;
1428 
1429 	mutex_lock(&dev_priv->display.hdcp.comp_mutex);
1430 	arbiter = dev_priv->display.hdcp.master;
1431 
1432 	if (!arbiter || !arbiter->ops) {
1433 		mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1434 		return -EINVAL;
1435 	}
1436 
1437 	ret = arbiter->ops->close_hdcp_session(arbiter->hdcp_dev,
1438 					     &dig_port->hdcp_port_data);
1439 	mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1440 
1441 	return ret;
1442 }
1443 
1444 static int hdcp2_deauthenticate_port(struct intel_connector *connector)
1445 {
1446 	return hdcp2_close_session(connector);
1447 }
1448 
1449 /* Authentication flow starts from here */
1450 static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
1451 {
1452 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1453 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1454 	struct intel_hdcp *hdcp = &connector->hdcp;
1455 	union {
1456 		struct hdcp2_ake_init ake_init;
1457 		struct hdcp2_ake_send_cert send_cert;
1458 		struct hdcp2_ake_no_stored_km no_stored_km;
1459 		struct hdcp2_ake_send_hprime send_hprime;
1460 		struct hdcp2_ake_send_pairing_info pairing_info;
1461 	} msgs;
1462 	const struct intel_hdcp_shim *shim = hdcp->shim;
1463 	size_t size;
1464 	int ret;
1465 
1466 	/* Init for seq_num */
1467 	hdcp->seq_num_v = 0;
1468 	hdcp->seq_num_m = 0;
1469 
1470 	ret = hdcp2_prepare_ake_init(connector, &msgs.ake_init);
1471 	if (ret < 0)
1472 		return ret;
1473 
1474 	ret = shim->write_2_2_msg(dig_port, &msgs.ake_init,
1475 				  sizeof(msgs.ake_init));
1476 	if (ret < 0)
1477 		return ret;
1478 
1479 	ret = shim->read_2_2_msg(dig_port, HDCP_2_2_AKE_SEND_CERT,
1480 				 &msgs.send_cert, sizeof(msgs.send_cert));
1481 	if (ret < 0)
1482 		return ret;
1483 
1484 	if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) {
1485 		drm_dbg_kms(&dev_priv->drm, "cert.rx_caps dont claim HDCP2.2\n");
1486 		return -EINVAL;
1487 	}
1488 
1489 	hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]);
1490 
1491 	if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
1492 					msgs.send_cert.cert_rx.receiver_id,
1493 					1) > 0) {
1494 		drm_err(&dev_priv->drm, "Receiver ID is revoked\n");
1495 		return -EPERM;
1496 	}
1497 
1498 	/*
1499 	 * Here msgs.no_stored_km will hold msgs corresponding to the km
1500 	 * stored also.
1501 	 */
1502 	ret = hdcp2_verify_rx_cert_prepare_km(connector, &msgs.send_cert,
1503 					      &hdcp->is_paired,
1504 					      &msgs.no_stored_km, &size);
1505 	if (ret < 0)
1506 		return ret;
1507 
1508 	ret = shim->write_2_2_msg(dig_port, &msgs.no_stored_km, size);
1509 	if (ret < 0)
1510 		return ret;
1511 
1512 	ret = shim->read_2_2_msg(dig_port, HDCP_2_2_AKE_SEND_HPRIME,
1513 				 &msgs.send_hprime, sizeof(msgs.send_hprime));
1514 	if (ret < 0)
1515 		return ret;
1516 
1517 	ret = hdcp2_verify_hprime(connector, &msgs.send_hprime);
1518 	if (ret < 0)
1519 		return ret;
1520 
1521 	if (!hdcp->is_paired) {
1522 		/* Pairing is required */
1523 		ret = shim->read_2_2_msg(dig_port,
1524 					 HDCP_2_2_AKE_SEND_PAIRING_INFO,
1525 					 &msgs.pairing_info,
1526 					 sizeof(msgs.pairing_info));
1527 		if (ret < 0)
1528 			return ret;
1529 
1530 		ret = hdcp2_store_pairing_info(connector, &msgs.pairing_info);
1531 		if (ret < 0)
1532 			return ret;
1533 		hdcp->is_paired = true;
1534 	}
1535 
1536 	return 0;
1537 }
1538 
1539 static int hdcp2_locality_check(struct intel_connector *connector)
1540 {
1541 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1542 	struct intel_hdcp *hdcp = &connector->hdcp;
1543 	union {
1544 		struct hdcp2_lc_init lc_init;
1545 		struct hdcp2_lc_send_lprime send_lprime;
1546 	} msgs;
1547 	const struct intel_hdcp_shim *shim = hdcp->shim;
1548 	int tries = HDCP2_LC_RETRY_CNT, ret, i;
1549 
1550 	for (i = 0; i < tries; i++) {
1551 		ret = hdcp2_prepare_lc_init(connector, &msgs.lc_init);
1552 		if (ret < 0)
1553 			continue;
1554 
1555 		ret = shim->write_2_2_msg(dig_port, &msgs.lc_init,
1556 				      sizeof(msgs.lc_init));
1557 		if (ret < 0)
1558 			continue;
1559 
1560 		ret = shim->read_2_2_msg(dig_port,
1561 					 HDCP_2_2_LC_SEND_LPRIME,
1562 					 &msgs.send_lprime,
1563 					 sizeof(msgs.send_lprime));
1564 		if (ret < 0)
1565 			continue;
1566 
1567 		ret = hdcp2_verify_lprime(connector, &msgs.send_lprime);
1568 		if (!ret)
1569 			break;
1570 	}
1571 
1572 	return ret;
1573 }
1574 
1575 static int hdcp2_session_key_exchange(struct intel_connector *connector)
1576 {
1577 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1578 	struct intel_hdcp *hdcp = &connector->hdcp;
1579 	struct hdcp2_ske_send_eks send_eks;
1580 	int ret;
1581 
1582 	ret = hdcp2_prepare_skey(connector, &send_eks);
1583 	if (ret < 0)
1584 		return ret;
1585 
1586 	ret = hdcp->shim->write_2_2_msg(dig_port, &send_eks,
1587 					sizeof(send_eks));
1588 	if (ret < 0)
1589 		return ret;
1590 
1591 	return 0;
1592 }
1593 
1594 static
1595 int _hdcp2_propagate_stream_management_info(struct intel_connector *connector)
1596 {
1597 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1598 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1599 	struct intel_hdcp *hdcp = &connector->hdcp;
1600 	union {
1601 		struct hdcp2_rep_stream_manage stream_manage;
1602 		struct hdcp2_rep_stream_ready stream_ready;
1603 	} msgs;
1604 	const struct intel_hdcp_shim *shim = hdcp->shim;
1605 	int ret, streams_size_delta, i;
1606 
1607 	if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX)
1608 		return -ERANGE;
1609 
1610 	/* Prepare RepeaterAuth_Stream_Manage msg */
1611 	msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE;
1612 	drm_hdcp_cpu_to_be24(msgs.stream_manage.seq_num_m, hdcp->seq_num_m);
1613 
1614 	msgs.stream_manage.k = cpu_to_be16(data->k);
1615 
1616 	for (i = 0; i < data->k; i++) {
1617 		msgs.stream_manage.streams[i].stream_id = data->streams[i].stream_id;
1618 		msgs.stream_manage.streams[i].stream_type = data->streams[i].stream_type;
1619 	}
1620 
1621 	streams_size_delta = (HDCP_2_2_MAX_CONTENT_STREAMS_CNT - data->k) *
1622 				sizeof(struct hdcp2_streamid_type);
1623 	/* Send it to Repeater */
1624 	ret = shim->write_2_2_msg(dig_port, &msgs.stream_manage,
1625 				  sizeof(msgs.stream_manage) - streams_size_delta);
1626 	if (ret < 0)
1627 		goto out;
1628 
1629 	ret = shim->read_2_2_msg(dig_port, HDCP_2_2_REP_STREAM_READY,
1630 				 &msgs.stream_ready, sizeof(msgs.stream_ready));
1631 	if (ret < 0)
1632 		goto out;
1633 
1634 	data->seq_num_m = hdcp->seq_num_m;
1635 
1636 	ret = hdcp2_verify_mprime(connector, &msgs.stream_ready);
1637 
1638 out:
1639 	hdcp->seq_num_m++;
1640 
1641 	return ret;
1642 }
1643 
1644 static
1645 int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
1646 {
1647 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1648 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1649 	struct intel_hdcp *hdcp = &connector->hdcp;
1650 	union {
1651 		struct hdcp2_rep_send_receiverid_list recvid_list;
1652 		struct hdcp2_rep_send_ack rep_ack;
1653 	} msgs;
1654 	const struct intel_hdcp_shim *shim = hdcp->shim;
1655 	u32 seq_num_v, device_cnt;
1656 	u8 *rx_info;
1657 	int ret;
1658 
1659 	ret = shim->read_2_2_msg(dig_port, HDCP_2_2_REP_SEND_RECVID_LIST,
1660 				 &msgs.recvid_list, sizeof(msgs.recvid_list));
1661 	if (ret < 0)
1662 		return ret;
1663 
1664 	rx_info = msgs.recvid_list.rx_info;
1665 
1666 	if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) ||
1667 	    HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) {
1668 		drm_dbg_kms(&dev_priv->drm, "Topology Max Size Exceeded\n");
1669 		return -EINVAL;
1670 	}
1671 
1672 	/*
1673 	 * MST topology is not Type 1 capable if it contains a downstream
1674 	 * device that is only HDCP 1.x or Legacy HDCP 2.0/2.1 compliant.
1675 	 */
1676 	dig_port->hdcp_mst_type1_capable =
1677 		!HDCP_2_2_HDCP1_DEVICE_CONNECTED(rx_info[1]) &&
1678 		!HDCP_2_2_HDCP_2_0_REP_CONNECTED(rx_info[1]);
1679 
1680 	/* Converting and Storing the seq_num_v to local variable as DWORD */
1681 	seq_num_v =
1682 		drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v);
1683 
1684 	if (!hdcp->hdcp2_encrypted && seq_num_v) {
1685 		drm_dbg_kms(&dev_priv->drm,
1686 			    "Non zero Seq_num_v at first RecvId_List msg\n");
1687 		return -EINVAL;
1688 	}
1689 
1690 	if (seq_num_v < hdcp->seq_num_v) {
1691 		/* Roll over of the seq_num_v from repeater. Reauthenticate. */
1692 		drm_dbg_kms(&dev_priv->drm, "Seq_num_v roll over.\n");
1693 		return -EINVAL;
1694 	}
1695 
1696 	device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
1697 		      HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
1698 	if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
1699 					msgs.recvid_list.receiver_ids,
1700 					device_cnt) > 0) {
1701 		drm_err(&dev_priv->drm, "Revoked receiver ID(s) is in list\n");
1702 		return -EPERM;
1703 	}
1704 
1705 	ret = hdcp2_verify_rep_topology_prepare_ack(connector,
1706 						    &msgs.recvid_list,
1707 						    &msgs.rep_ack);
1708 	if (ret < 0)
1709 		return ret;
1710 
1711 	hdcp->seq_num_v = seq_num_v;
1712 	ret = shim->write_2_2_msg(dig_port, &msgs.rep_ack,
1713 				  sizeof(msgs.rep_ack));
1714 	if (ret < 0)
1715 		return ret;
1716 
1717 	return 0;
1718 }
1719 
1720 static int hdcp2_authenticate_sink(struct intel_connector *connector)
1721 {
1722 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1723 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1724 	struct intel_hdcp *hdcp = &connector->hdcp;
1725 	const struct intel_hdcp_shim *shim = hdcp->shim;
1726 	int ret;
1727 
1728 	ret = hdcp2_authentication_key_exchange(connector);
1729 	if (ret < 0) {
1730 		drm_dbg_kms(&i915->drm, "AKE Failed. Err : %d\n", ret);
1731 		return ret;
1732 	}
1733 
1734 	ret = hdcp2_locality_check(connector);
1735 	if (ret < 0) {
1736 		drm_dbg_kms(&i915->drm,
1737 			    "Locality Check failed. Err : %d\n", ret);
1738 		return ret;
1739 	}
1740 
1741 	ret = hdcp2_session_key_exchange(connector);
1742 	if (ret < 0) {
1743 		drm_dbg_kms(&i915->drm, "SKE Failed. Err : %d\n", ret);
1744 		return ret;
1745 	}
1746 
1747 	if (shim->config_stream_type) {
1748 		ret = shim->config_stream_type(dig_port,
1749 					       hdcp->is_repeater,
1750 					       hdcp->content_type);
1751 		if (ret < 0)
1752 			return ret;
1753 	}
1754 
1755 	if (hdcp->is_repeater) {
1756 		ret = hdcp2_authenticate_repeater_topology(connector);
1757 		if (ret < 0) {
1758 			drm_dbg_kms(&i915->drm,
1759 				    "Repeater Auth Failed. Err: %d\n", ret);
1760 			return ret;
1761 		}
1762 	}
1763 
1764 	return ret;
1765 }
1766 
1767 static int hdcp2_enable_stream_encryption(struct intel_connector *connector)
1768 {
1769 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1770 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1771 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1772 	struct intel_hdcp *hdcp = &connector->hdcp;
1773 	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1774 	enum port port = dig_port->base.port;
1775 	int ret = 0;
1776 
1777 	if (!(intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1778 			    LINK_ENCRYPTION_STATUS)) {
1779 		drm_err(&dev_priv->drm, "[%s:%d] HDCP 2.2 Link is not encrypted\n",
1780 			connector->base.name, connector->base.base.id);
1781 		ret = -EPERM;
1782 		goto link_recover;
1783 	}
1784 
1785 	if (hdcp->shim->stream_2_2_encryption) {
1786 		ret = hdcp->shim->stream_2_2_encryption(connector, true);
1787 		if (ret) {
1788 			drm_err(&dev_priv->drm, "[%s:%d] Failed to enable HDCP 2.2 stream enc\n",
1789 				connector->base.name, connector->base.base.id);
1790 			return ret;
1791 		}
1792 		drm_dbg_kms(&dev_priv->drm, "HDCP 2.2 transcoder: %s stream encrypted\n",
1793 			    transcoder_name(hdcp->stream_transcoder));
1794 	}
1795 
1796 	return 0;
1797 
1798 link_recover:
1799 	if (hdcp2_deauthenticate_port(connector) < 0)
1800 		drm_dbg_kms(&dev_priv->drm, "Port deauth failed.\n");
1801 
1802 	dig_port->hdcp_auth_status = false;
1803 	data->k = 0;
1804 
1805 	return ret;
1806 }
1807 
1808 static int hdcp2_enable_encryption(struct intel_connector *connector)
1809 {
1810 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1811 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1812 	struct intel_hdcp *hdcp = &connector->hdcp;
1813 	enum port port = dig_port->base.port;
1814 	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1815 	int ret;
1816 
1817 	drm_WARN_ON(&dev_priv->drm,
1818 		    intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1819 		    LINK_ENCRYPTION_STATUS);
1820 	if (hdcp->shim->toggle_signalling) {
1821 		ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder,
1822 						    true);
1823 		if (ret) {
1824 			drm_err(&dev_priv->drm,
1825 				"Failed to enable HDCP signalling. %d\n",
1826 				ret);
1827 			return ret;
1828 		}
1829 	}
1830 
1831 	if (intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1832 	    LINK_AUTH_STATUS)
1833 		/* Link is Authenticated. Now set for Encryption */
1834 		intel_de_rmw(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port),
1835 			     0, CTL_LINK_ENCRYPTION_REQ);
1836 
1837 	ret = intel_de_wait_for_set(dev_priv,
1838 				    HDCP2_STATUS(dev_priv, cpu_transcoder,
1839 						 port),
1840 				    LINK_ENCRYPTION_STATUS,
1841 				    HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1842 	dig_port->hdcp_auth_status = true;
1843 
1844 	return ret;
1845 }
1846 
1847 static int hdcp2_disable_encryption(struct intel_connector *connector)
1848 {
1849 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1850 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1851 	struct intel_hdcp *hdcp = &connector->hdcp;
1852 	enum port port = dig_port->base.port;
1853 	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1854 	int ret;
1855 
1856 	drm_WARN_ON(&dev_priv->drm, !(intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1857 				      LINK_ENCRYPTION_STATUS));
1858 
1859 	intel_de_rmw(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port),
1860 		     CTL_LINK_ENCRYPTION_REQ, 0);
1861 
1862 	ret = intel_de_wait_for_clear(dev_priv,
1863 				      HDCP2_STATUS(dev_priv, cpu_transcoder,
1864 						   port),
1865 				      LINK_ENCRYPTION_STATUS,
1866 				      HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1867 	if (ret == -ETIMEDOUT)
1868 		drm_dbg_kms(&dev_priv->drm, "Disable Encryption Timedout");
1869 
1870 	if (hdcp->shim->toggle_signalling) {
1871 		ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder,
1872 						    false);
1873 		if (ret) {
1874 			drm_err(&dev_priv->drm,
1875 				"Failed to disable HDCP signalling. %d\n",
1876 				ret);
1877 			return ret;
1878 		}
1879 	}
1880 
1881 	return ret;
1882 }
1883 
1884 static int
1885 hdcp2_propagate_stream_management_info(struct intel_connector *connector)
1886 {
1887 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1888 	int i, tries = 3, ret;
1889 
1890 	if (!connector->hdcp.is_repeater)
1891 		return 0;
1892 
1893 	for (i = 0; i < tries; i++) {
1894 		ret = _hdcp2_propagate_stream_management_info(connector);
1895 		if (!ret)
1896 			break;
1897 
1898 		/* Lets restart the auth incase of seq_num_m roll over */
1899 		if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX) {
1900 			drm_dbg_kms(&i915->drm,
1901 				    "seq_num_m roll over.(%d)\n", ret);
1902 			break;
1903 		}
1904 
1905 		drm_dbg_kms(&i915->drm,
1906 			    "HDCP2 stream management %d of %d Failed.(%d)\n",
1907 			    i + 1, tries, ret);
1908 	}
1909 
1910 	return ret;
1911 }
1912 
1913 static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
1914 {
1915 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1916 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1917 	int ret = 0, i, tries = 3;
1918 
1919 	for (i = 0; i < tries && !dig_port->hdcp_auth_status; i++) {
1920 		ret = hdcp2_authenticate_sink(connector);
1921 		if (!ret) {
1922 			ret = intel_hdcp_prepare_streams(connector);
1923 			if (ret) {
1924 				drm_dbg_kms(&i915->drm,
1925 					    "Prepare streams failed.(%d)\n",
1926 					    ret);
1927 				break;
1928 			}
1929 
1930 			ret = hdcp2_propagate_stream_management_info(connector);
1931 			if (ret) {
1932 				drm_dbg_kms(&i915->drm,
1933 					    "Stream management failed.(%d)\n",
1934 					    ret);
1935 				break;
1936 			}
1937 
1938 			ret = hdcp2_authenticate_port(connector);
1939 			if (!ret)
1940 				break;
1941 			drm_dbg_kms(&i915->drm, "HDCP2 port auth failed.(%d)\n",
1942 				    ret);
1943 		}
1944 
1945 		/* Clearing the mei hdcp session */
1946 		drm_dbg_kms(&i915->drm, "HDCP2.2 Auth %d of %d Failed.(%d)\n",
1947 			    i + 1, tries, ret);
1948 		if (hdcp2_deauthenticate_port(connector) < 0)
1949 			drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
1950 	}
1951 
1952 	if (!ret && !dig_port->hdcp_auth_status) {
1953 		/*
1954 		 * Ensuring the required 200mSec min time interval between
1955 		 * Session Key Exchange and encryption.
1956 		 */
1957 		msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN);
1958 		ret = hdcp2_enable_encryption(connector);
1959 		if (ret < 0) {
1960 			drm_dbg_kms(&i915->drm,
1961 				    "Encryption Enable Failed.(%d)\n", ret);
1962 			if (hdcp2_deauthenticate_port(connector) < 0)
1963 				drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
1964 		}
1965 	}
1966 
1967 	if (!ret)
1968 		ret = hdcp2_enable_stream_encryption(connector);
1969 
1970 	return ret;
1971 }
1972 
1973 static int _intel_hdcp2_enable(struct intel_connector *connector)
1974 {
1975 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1976 	struct intel_hdcp *hdcp = &connector->hdcp;
1977 	int ret;
1978 
1979 	drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being enabled. Type: %d\n",
1980 		    connector->base.name, connector->base.base.id,
1981 		    hdcp->content_type);
1982 
1983 	ret = hdcp2_authenticate_and_encrypt(connector);
1984 	if (ret) {
1985 		drm_dbg_kms(&i915->drm, "HDCP2 Type%d  Enabling Failed. (%d)\n",
1986 			    hdcp->content_type, ret);
1987 		return ret;
1988 	}
1989 
1990 	drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is enabled. Type %d\n",
1991 		    connector->base.name, connector->base.base.id,
1992 		    hdcp->content_type);
1993 
1994 	hdcp->hdcp2_encrypted = true;
1995 	return 0;
1996 }
1997 
1998 static int
1999 _intel_hdcp2_disable(struct intel_connector *connector, bool hdcp2_link_recovery)
2000 {
2001 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2002 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
2003 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
2004 	struct intel_hdcp *hdcp = &connector->hdcp;
2005 	int ret;
2006 
2007 	drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being Disabled\n",
2008 		    connector->base.name, connector->base.base.id);
2009 
2010 	if (hdcp->shim->stream_2_2_encryption) {
2011 		ret = hdcp->shim->stream_2_2_encryption(connector, false);
2012 		if (ret) {
2013 			drm_err(&i915->drm, "[%s:%d] Failed to disable HDCP 2.2 stream enc\n",
2014 				connector->base.name, connector->base.base.id);
2015 			return ret;
2016 		}
2017 		drm_dbg_kms(&i915->drm, "HDCP 2.2 transcoder: %s stream encryption disabled\n",
2018 			    transcoder_name(hdcp->stream_transcoder));
2019 
2020 		if (dig_port->num_hdcp_streams > 0 && !hdcp2_link_recovery)
2021 			return 0;
2022 	}
2023 
2024 	ret = hdcp2_disable_encryption(connector);
2025 
2026 	if (hdcp2_deauthenticate_port(connector) < 0)
2027 		drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
2028 
2029 	connector->hdcp.hdcp2_encrypted = false;
2030 	dig_port->hdcp_auth_status = false;
2031 	data->k = 0;
2032 
2033 	return ret;
2034 }
2035 
2036 /* Implements the Link Integrity Check for HDCP2.2 */
2037 static int intel_hdcp2_check_link(struct intel_connector *connector)
2038 {
2039 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2040 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
2041 	struct intel_hdcp *hdcp = &connector->hdcp;
2042 	enum port port = dig_port->base.port;
2043 	enum transcoder cpu_transcoder;
2044 	int ret = 0;
2045 
2046 	mutex_lock(&hdcp->mutex);
2047 	mutex_lock(&dig_port->hdcp_mutex);
2048 	cpu_transcoder = hdcp->cpu_transcoder;
2049 
2050 	/* hdcp2_check_link is expected only when HDCP2.2 is Enabled */
2051 	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
2052 	    !hdcp->hdcp2_encrypted) {
2053 		ret = -EINVAL;
2054 		goto out;
2055 	}
2056 
2057 	if (drm_WARN_ON(&dev_priv->drm,
2058 			!intel_hdcp2_in_use(dev_priv, cpu_transcoder, port))) {
2059 		drm_err(&dev_priv->drm,
2060 			"HDCP2.2 link stopped the encryption, %x\n",
2061 			intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)));
2062 		ret = -ENXIO;
2063 		_intel_hdcp2_disable(connector, true);
2064 		intel_hdcp_update_value(connector,
2065 					DRM_MODE_CONTENT_PROTECTION_DESIRED,
2066 					true);
2067 		goto out;
2068 	}
2069 
2070 	ret = hdcp->shim->check_2_2_link(dig_port, connector);
2071 	if (ret == HDCP_LINK_PROTECTED) {
2072 		if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
2073 			intel_hdcp_update_value(connector,
2074 					DRM_MODE_CONTENT_PROTECTION_ENABLED,
2075 					true);
2076 		}
2077 		goto out;
2078 	}
2079 
2080 	if (ret == HDCP_TOPOLOGY_CHANGE) {
2081 		if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
2082 			goto out;
2083 
2084 		drm_dbg_kms(&dev_priv->drm,
2085 			    "HDCP2.2 Downstream topology change\n");
2086 		ret = hdcp2_authenticate_repeater_topology(connector);
2087 		if (!ret) {
2088 			intel_hdcp_update_value(connector,
2089 					DRM_MODE_CONTENT_PROTECTION_ENABLED,
2090 					true);
2091 			goto out;
2092 		}
2093 		drm_dbg_kms(&dev_priv->drm,
2094 			    "[%s:%d] Repeater topology auth failed.(%d)\n",
2095 			    connector->base.name, connector->base.base.id,
2096 			    ret);
2097 	} else {
2098 		drm_dbg_kms(&dev_priv->drm,
2099 			    "[%s:%d] HDCP2.2 link failed, retrying auth\n",
2100 			    connector->base.name, connector->base.base.id);
2101 	}
2102 
2103 	ret = _intel_hdcp2_disable(connector, true);
2104 	if (ret) {
2105 		drm_err(&dev_priv->drm,
2106 			"[%s:%d] Failed to disable hdcp2.2 (%d)\n",
2107 			connector->base.name, connector->base.base.id, ret);
2108 		intel_hdcp_update_value(connector,
2109 				DRM_MODE_CONTENT_PROTECTION_DESIRED, true);
2110 		goto out;
2111 	}
2112 
2113 	ret = _intel_hdcp2_enable(connector);
2114 	if (ret) {
2115 		drm_dbg_kms(&dev_priv->drm,
2116 			    "[%s:%d] Failed to enable hdcp2.2 (%d)\n",
2117 			    connector->base.name, connector->base.base.id,
2118 			    ret);
2119 		intel_hdcp_update_value(connector,
2120 					DRM_MODE_CONTENT_PROTECTION_DESIRED,
2121 					true);
2122 		goto out;
2123 	}
2124 
2125 out:
2126 	mutex_unlock(&dig_port->hdcp_mutex);
2127 	mutex_unlock(&hdcp->mutex);
2128 	return ret;
2129 }
2130 
2131 static void intel_hdcp_check_work(struct work_struct *work)
2132 {
2133 	struct intel_hdcp *hdcp = container_of(to_delayed_work(work),
2134 					       struct intel_hdcp,
2135 					       check_work);
2136 	struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
2137 
2138 	if (drm_connector_is_unregistered(&connector->base))
2139 		return;
2140 
2141 	if (!intel_hdcp2_check_link(connector))
2142 		schedule_delayed_work(&hdcp->check_work,
2143 				      DRM_HDCP2_CHECK_PERIOD_MS);
2144 	else if (!intel_hdcp_check_link(connector))
2145 		schedule_delayed_work(&hdcp->check_work,
2146 				      DRM_HDCP_CHECK_PERIOD_MS);
2147 }
2148 
2149 static int i915_hdcp_component_bind(struct device *i915_kdev,
2150 				    struct device *mei_kdev, void *data)
2151 {
2152 	struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
2153 
2154 	drm_dbg(&dev_priv->drm, "I915 HDCP comp bind\n");
2155 	mutex_lock(&dev_priv->display.hdcp.comp_mutex);
2156 	dev_priv->display.hdcp.master = (struct i915_hdcp_master *)data;
2157 	dev_priv->display.hdcp.master->hdcp_dev = mei_kdev;
2158 	mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
2159 
2160 	return 0;
2161 }
2162 
2163 static void i915_hdcp_component_unbind(struct device *i915_kdev,
2164 				       struct device *mei_kdev, void *data)
2165 {
2166 	struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
2167 
2168 	drm_dbg(&dev_priv->drm, "I915 HDCP comp unbind\n");
2169 	mutex_lock(&dev_priv->display.hdcp.comp_mutex);
2170 	dev_priv->display.hdcp.master = NULL;
2171 	mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
2172 }
2173 
2174 static const struct component_ops i915_hdcp_ops = {
2175 	.bind   = i915_hdcp_component_bind,
2176 	.unbind = i915_hdcp_component_unbind,
2177 };
2178 
2179 static enum hdcp_ddi intel_get_hdcp_ddi_index(enum port port)
2180 {
2181 	switch (port) {
2182 	case PORT_A:
2183 		return HDCP_DDI_A;
2184 	case PORT_B ... PORT_F:
2185 		return (enum hdcp_ddi)port;
2186 	default:
2187 		return HDCP_DDI_INVALID_PORT;
2188 	}
2189 }
2190 
2191 static enum hdcp_transcoder intel_get_hdcp_transcoder(enum transcoder cpu_transcoder)
2192 {
2193 	switch (cpu_transcoder) {
2194 	case TRANSCODER_A ... TRANSCODER_D:
2195 		return (enum hdcp_transcoder)(cpu_transcoder | 0x10);
2196 	default: /* eDP, DSI TRANSCODERS are non HDCP capable */
2197 		return HDCP_INVALID_TRANSCODER;
2198 	}
2199 }
2200 
2201 static int initialize_hdcp_port_data(struct intel_connector *connector,
2202 				     struct intel_digital_port *dig_port,
2203 				     const struct intel_hdcp_shim *shim)
2204 {
2205 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
2206 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
2207 	struct intel_hdcp *hdcp = &connector->hdcp;
2208 	enum port port = dig_port->base.port;
2209 
2210 	if (DISPLAY_VER(dev_priv) < 12)
2211 		data->hdcp_ddi = intel_get_hdcp_ddi_index(port);
2212 	else
2213 		/*
2214 		 * As per ME FW API expectation, for GEN 12+, hdcp_ddi is filled
2215 		 * with zero(INVALID PORT index).
2216 		 */
2217 		data->hdcp_ddi = HDCP_DDI_INVALID_PORT;
2218 
2219 	/*
2220 	 * As associated transcoder is set and modified at modeset, here hdcp_transcoder
2221 	 * is initialized to zero (invalid transcoder index). This will be
2222 	 * retained for <Gen12 forever.
2223 	 */
2224 	data->hdcp_transcoder = HDCP_INVALID_TRANSCODER;
2225 
2226 	data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
2227 	data->protocol = (u8)shim->protocol;
2228 
2229 	if (!data->streams)
2230 		data->streams = kcalloc(INTEL_NUM_PIPES(dev_priv),
2231 					sizeof(struct hdcp2_streamid_type),
2232 					GFP_KERNEL);
2233 	if (!data->streams) {
2234 		drm_err(&dev_priv->drm, "Out of Memory\n");
2235 		return -ENOMEM;
2236 	}
2237 	/* For SST */
2238 	data->streams[0].stream_id = 0;
2239 	data->streams[0].stream_type = hdcp->content_type;
2240 
2241 	return 0;
2242 }
2243 
2244 static bool is_hdcp2_supported(struct drm_i915_private *dev_priv)
2245 {
2246 	if (intel_hdcp_gsc_cs_required(dev_priv))
2247 		return true;
2248 
2249 	if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
2250 		return false;
2251 
2252 	return (DISPLAY_VER(dev_priv) >= 10 ||
2253 		IS_KABYLAKE(dev_priv) ||
2254 		IS_COFFEELAKE(dev_priv) ||
2255 		IS_COMETLAKE(dev_priv));
2256 }
2257 
2258 void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
2259 {
2260 	int ret;
2261 
2262 	if (!is_hdcp2_supported(dev_priv))
2263 		return;
2264 
2265 	mutex_lock(&dev_priv->display.hdcp.comp_mutex);
2266 	drm_WARN_ON(&dev_priv->drm, dev_priv->display.hdcp.comp_added);
2267 
2268 	dev_priv->display.hdcp.comp_added = true;
2269 	mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
2270 	if (intel_hdcp_gsc_cs_required(dev_priv))
2271 		ret = intel_hdcp_gsc_init(dev_priv);
2272 	else
2273 		ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_ops,
2274 					  I915_COMPONENT_HDCP);
2275 
2276 	if (ret < 0) {
2277 		drm_dbg_kms(&dev_priv->drm, "Failed at fw component add(%d)\n",
2278 			    ret);
2279 		mutex_lock(&dev_priv->display.hdcp.comp_mutex);
2280 		dev_priv->display.hdcp.comp_added = false;
2281 		mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
2282 		return;
2283 	}
2284 }
2285 
2286 static void intel_hdcp2_init(struct intel_connector *connector,
2287 			     struct intel_digital_port *dig_port,
2288 			     const struct intel_hdcp_shim *shim)
2289 {
2290 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
2291 	struct intel_hdcp *hdcp = &connector->hdcp;
2292 	int ret;
2293 
2294 	ret = initialize_hdcp_port_data(connector, dig_port, shim);
2295 	if (ret) {
2296 		drm_dbg_kms(&i915->drm, "Mei hdcp data init failed\n");
2297 		return;
2298 	}
2299 
2300 	hdcp->hdcp2_supported = true;
2301 }
2302 
2303 int intel_hdcp_init(struct intel_connector *connector,
2304 		    struct intel_digital_port *dig_port,
2305 		    const struct intel_hdcp_shim *shim)
2306 {
2307 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
2308 	struct intel_hdcp *hdcp = &connector->hdcp;
2309 	int ret;
2310 
2311 	if (!shim)
2312 		return -EINVAL;
2313 
2314 	if (is_hdcp2_supported(dev_priv))
2315 		intel_hdcp2_init(connector, dig_port, shim);
2316 
2317 	ret =
2318 	drm_connector_attach_content_protection_property(&connector->base,
2319 							 hdcp->hdcp2_supported);
2320 	if (ret) {
2321 		hdcp->hdcp2_supported = false;
2322 		kfree(dig_port->hdcp_port_data.streams);
2323 		return ret;
2324 	}
2325 
2326 	hdcp->shim = shim;
2327 	mutex_init(&hdcp->mutex);
2328 	INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
2329 	INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
2330 	init_waitqueue_head(&hdcp->cp_irq_queue);
2331 
2332 	return 0;
2333 }
2334 
2335 int intel_hdcp_enable(struct intel_connector *connector,
2336 		      const struct intel_crtc_state *pipe_config, u8 content_type)
2337 {
2338 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
2339 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2340 	struct intel_hdcp *hdcp = &connector->hdcp;
2341 	unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS;
2342 	int ret = -EINVAL;
2343 
2344 	if (!hdcp->shim)
2345 		return -ENOENT;
2346 
2347 	if (!connector->encoder) {
2348 		drm_err(&dev_priv->drm, "[%s:%d] encoder is not initialized\n",
2349 			connector->base.name, connector->base.base.id);
2350 		return -ENODEV;
2351 	}
2352 
2353 	mutex_lock(&hdcp->mutex);
2354 	mutex_lock(&dig_port->hdcp_mutex);
2355 	drm_WARN_ON(&dev_priv->drm,
2356 		    hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
2357 	hdcp->content_type = content_type;
2358 
2359 	if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)) {
2360 		hdcp->cpu_transcoder = pipe_config->mst_master_transcoder;
2361 		hdcp->stream_transcoder = pipe_config->cpu_transcoder;
2362 	} else {
2363 		hdcp->cpu_transcoder = pipe_config->cpu_transcoder;
2364 		hdcp->stream_transcoder = INVALID_TRANSCODER;
2365 	}
2366 
2367 	if (DISPLAY_VER(dev_priv) >= 12)
2368 		dig_port->hdcp_port_data.hdcp_transcoder =
2369 			intel_get_hdcp_transcoder(hdcp->cpu_transcoder);
2370 
2371 	/*
2372 	 * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
2373 	 * is capable of HDCP2.2, it is preferred to use HDCP2.2.
2374 	 */
2375 	if (intel_hdcp2_capable(connector)) {
2376 		ret = _intel_hdcp2_enable(connector);
2377 		if (!ret)
2378 			check_link_interval = DRM_HDCP2_CHECK_PERIOD_MS;
2379 	}
2380 
2381 	/*
2382 	 * When HDCP2.2 fails and Content Type is not Type1, HDCP1.4 will
2383 	 * be attempted.
2384 	 */
2385 	if (ret && intel_hdcp_capable(connector) &&
2386 	    hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) {
2387 		ret = _intel_hdcp_enable(connector);
2388 	}
2389 
2390 	if (!ret) {
2391 		schedule_delayed_work(&hdcp->check_work, check_link_interval);
2392 		intel_hdcp_update_value(connector,
2393 					DRM_MODE_CONTENT_PROTECTION_ENABLED,
2394 					true);
2395 	}
2396 
2397 	mutex_unlock(&dig_port->hdcp_mutex);
2398 	mutex_unlock(&hdcp->mutex);
2399 	return ret;
2400 }
2401 
2402 int intel_hdcp_disable(struct intel_connector *connector)
2403 {
2404 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2405 	struct intel_hdcp *hdcp = &connector->hdcp;
2406 	int ret = 0;
2407 
2408 	if (!hdcp->shim)
2409 		return -ENOENT;
2410 
2411 	mutex_lock(&hdcp->mutex);
2412 	mutex_lock(&dig_port->hdcp_mutex);
2413 
2414 	if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
2415 		goto out;
2416 
2417 	intel_hdcp_update_value(connector,
2418 				DRM_MODE_CONTENT_PROTECTION_UNDESIRED, false);
2419 	if (hdcp->hdcp2_encrypted)
2420 		ret = _intel_hdcp2_disable(connector, false);
2421 	else if (hdcp->hdcp_encrypted)
2422 		ret = _intel_hdcp_disable(connector);
2423 
2424 out:
2425 	mutex_unlock(&dig_port->hdcp_mutex);
2426 	mutex_unlock(&hdcp->mutex);
2427 	cancel_delayed_work_sync(&hdcp->check_work);
2428 	return ret;
2429 }
2430 
2431 void intel_hdcp_update_pipe(struct intel_atomic_state *state,
2432 			    struct intel_encoder *encoder,
2433 			    const struct intel_crtc_state *crtc_state,
2434 			    const struct drm_connector_state *conn_state)
2435 {
2436 	struct intel_connector *connector =
2437 				to_intel_connector(conn_state->connector);
2438 	struct intel_hdcp *hdcp = &connector->hdcp;
2439 	bool content_protection_type_changed, desired_and_not_enabled = false;
2440 
2441 	if (!connector->hdcp.shim)
2442 		return;
2443 
2444 	content_protection_type_changed =
2445 		(conn_state->hdcp_content_type != hdcp->content_type &&
2446 		 conn_state->content_protection !=
2447 		 DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
2448 
2449 	/*
2450 	 * During the HDCP encryption session if Type change is requested,
2451 	 * disable the HDCP and reenable it with new TYPE value.
2452 	 */
2453 	if (conn_state->content_protection ==
2454 	    DRM_MODE_CONTENT_PROTECTION_UNDESIRED ||
2455 	    content_protection_type_changed)
2456 		intel_hdcp_disable(connector);
2457 
2458 	/*
2459 	 * Mark the hdcp state as DESIRED after the hdcp disable of type
2460 	 * change procedure.
2461 	 */
2462 	if (content_protection_type_changed) {
2463 		mutex_lock(&hdcp->mutex);
2464 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2465 		drm_connector_get(&connector->base);
2466 		schedule_work(&hdcp->prop_work);
2467 		mutex_unlock(&hdcp->mutex);
2468 	}
2469 
2470 	if (conn_state->content_protection ==
2471 	    DRM_MODE_CONTENT_PROTECTION_DESIRED) {
2472 		mutex_lock(&hdcp->mutex);
2473 		/* Avoid enabling hdcp, if it already ENABLED */
2474 		desired_and_not_enabled =
2475 			hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED;
2476 		mutex_unlock(&hdcp->mutex);
2477 		/*
2478 		 * If HDCP already ENABLED and CP property is DESIRED, schedule
2479 		 * prop_work to update correct CP property to user space.
2480 		 */
2481 		if (!desired_and_not_enabled && !content_protection_type_changed) {
2482 			drm_connector_get(&connector->base);
2483 			schedule_work(&hdcp->prop_work);
2484 		}
2485 	}
2486 
2487 	if (desired_and_not_enabled || content_protection_type_changed)
2488 		intel_hdcp_enable(connector,
2489 				  crtc_state,
2490 				  (u8)conn_state->hdcp_content_type);
2491 }
2492 
2493 void intel_hdcp_component_fini(struct drm_i915_private *dev_priv)
2494 {
2495 	mutex_lock(&dev_priv->display.hdcp.comp_mutex);
2496 	if (!dev_priv->display.hdcp.comp_added) {
2497 		mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
2498 		return;
2499 	}
2500 
2501 	dev_priv->display.hdcp.comp_added = false;
2502 	mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
2503 
2504 	if (intel_hdcp_gsc_cs_required(dev_priv))
2505 		intel_hdcp_gsc_fini(dev_priv);
2506 	else
2507 		component_del(dev_priv->drm.dev, &i915_hdcp_ops);
2508 }
2509 
2510 void intel_hdcp_cleanup(struct intel_connector *connector)
2511 {
2512 	struct intel_hdcp *hdcp = &connector->hdcp;
2513 
2514 	if (!hdcp->shim)
2515 		return;
2516 
2517 	/*
2518 	 * If the connector is registered, it's possible userspace could kick
2519 	 * off another HDCP enable, which would re-spawn the workers.
2520 	 */
2521 	drm_WARN_ON(connector->base.dev,
2522 		connector->base.registration_state == DRM_CONNECTOR_REGISTERED);
2523 
2524 	/*
2525 	 * Now that the connector is not registered, check_work won't be run,
2526 	 * but cancel any outstanding instances of it
2527 	 */
2528 	cancel_delayed_work_sync(&hdcp->check_work);
2529 
2530 	/*
2531 	 * We don't cancel prop_work in the same way as check_work since it
2532 	 * requires connection_mutex which could be held while calling this
2533 	 * function. Instead, we rely on the connector references grabbed before
2534 	 * scheduling prop_work to ensure the connector is alive when prop_work
2535 	 * is run. So if we're in the destroy path (which is where this
2536 	 * function should be called), we're "guaranteed" that prop_work is not
2537 	 * active (tl;dr This Should Never Happen).
2538 	 */
2539 	drm_WARN_ON(connector->base.dev, work_pending(&hdcp->prop_work));
2540 
2541 	mutex_lock(&hdcp->mutex);
2542 	hdcp->shim = NULL;
2543 	mutex_unlock(&hdcp->mutex);
2544 }
2545 
2546 void intel_hdcp_atomic_check(struct drm_connector *connector,
2547 			     struct drm_connector_state *old_state,
2548 			     struct drm_connector_state *new_state)
2549 {
2550 	u64 old_cp = old_state->content_protection;
2551 	u64 new_cp = new_state->content_protection;
2552 	struct drm_crtc_state *crtc_state;
2553 
2554 	if (!new_state->crtc) {
2555 		/*
2556 		 * If the connector is being disabled with CP enabled, mark it
2557 		 * desired so it's re-enabled when the connector is brought back
2558 		 */
2559 		if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2560 			new_state->content_protection =
2561 				DRM_MODE_CONTENT_PROTECTION_DESIRED;
2562 		return;
2563 	}
2564 
2565 	crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
2566 						   new_state->crtc);
2567 	/*
2568 	 * Fix the HDCP uapi content protection state in case of modeset.
2569 	 * FIXME: As per HDCP content protection property uapi doc, an uevent()
2570 	 * need to be sent if there is transition from ENABLED->DESIRED.
2571 	 */
2572 	if (drm_atomic_crtc_needs_modeset(crtc_state) &&
2573 	    (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
2574 	    new_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED))
2575 		new_state->content_protection =
2576 			DRM_MODE_CONTENT_PROTECTION_DESIRED;
2577 
2578 	/*
2579 	 * Nothing to do if the state didn't change, or HDCP was activated since
2580 	 * the last commit. And also no change in hdcp content type.
2581 	 */
2582 	if (old_cp == new_cp ||
2583 	    (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
2584 	     new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)) {
2585 		if (old_state->hdcp_content_type ==
2586 				new_state->hdcp_content_type)
2587 			return;
2588 	}
2589 
2590 	crtc_state->mode_changed = true;
2591 }
2592 
2593 /* Handles the CP_IRQ raised from the DP HDCP sink */
2594 void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
2595 {
2596 	struct intel_hdcp *hdcp = &connector->hdcp;
2597 
2598 	if (!hdcp->shim)
2599 		return;
2600 
2601 	atomic_inc(&connector->hdcp.cp_irq_count);
2602 	wake_up_all(&connector->hdcp.cp_irq_queue);
2603 
2604 	schedule_delayed_work(&hdcp->check_work, 0);
2605 }
2606