Merge drm/drm-next into drm-intel-next-queued
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / intel_hdcp.c
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright (C) 2017 Google, Inc.
4  *
5  * Authors:
6  * Sean Paul <seanpaul@chromium.org>
7  */
8
9 #include <drm/drm_hdcp.h>
10 #include <drm/i915_component.h>
11 #include <linux/i2c.h>
12 #include <linux/random.h>
13 #include <linux/component.h>
14
15 #include "intel_drv.h"
16 #include "i915_reg.h"
17
18 #define KEY_LOAD_TRIES  5
19 #define ENCRYPT_STATUS_CHANGE_TIMEOUT_MS        50
20 #define HDCP2_LC_RETRY_CNT                      3
21
22 static
23 bool intel_hdcp_is_ksv_valid(u8 *ksv)
24 {
25         int i, ones = 0;
26         /* KSV has 20 1's and 20 0's */
27         for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
28                 ones += hweight8(ksv[i]);
29         if (ones != 20)
30                 return false;
31
32         return true;
33 }
34
35 static
36 int intel_hdcp_read_valid_bksv(struct intel_digital_port *intel_dig_port,
37                                const struct intel_hdcp_shim *shim, u8 *bksv)
38 {
39         int ret, i, tries = 2;
40
41         /* HDCP spec states that we must retry the bksv if it is invalid */
42         for (i = 0; i < tries; i++) {
43                 ret = shim->read_bksv(intel_dig_port, bksv);
44                 if (ret)
45                         return ret;
46                 if (intel_hdcp_is_ksv_valid(bksv))
47                         break;
48         }
49         if (i == tries) {
50                 DRM_DEBUG_KMS("Bksv is invalid\n");
51                 return -ENODEV;
52         }
53
54         return 0;
55 }
56
57 /* Is HDCP1.4 capable on Platform and Sink */
58 bool intel_hdcp_capable(struct intel_connector *connector)
59 {
60         struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
61         const struct intel_hdcp_shim *shim = connector->hdcp.shim;
62         bool capable = false;
63         u8 bksv[5];
64
65         if (!shim)
66                 return capable;
67
68         if (shim->hdcp_capable) {
69                 shim->hdcp_capable(intel_dig_port, &capable);
70         } else {
71                 if (!intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv))
72                         capable = true;
73         }
74
75         return capable;
76 }
77
78 /* Is HDCP2.2 capable on Platform and Sink */
79 static bool intel_hdcp2_capable(struct intel_connector *connector)
80 {
81         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
82         struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
83         struct intel_hdcp *hdcp = &connector->hdcp;
84         bool capable = false;
85
86         /* I915 support for HDCP2.2 */
87         if (!hdcp->hdcp2_supported)
88                 return false;
89
90         /* MEI interface is solid */
91         mutex_lock(&dev_priv->hdcp_comp_mutex);
92         if (!dev_priv->hdcp_comp_added ||  !dev_priv->hdcp_master) {
93                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
94                 return false;
95         }
96         mutex_unlock(&dev_priv->hdcp_comp_mutex);
97
98         /* Sink's capability for HDCP2.2 */
99         hdcp->shim->hdcp_2_2_capable(intel_dig_port, &capable);
100
101         return capable;
102 }
103
104 static inline bool intel_hdcp_in_use(struct intel_connector *connector)
105 {
106         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
107         enum port port = connector->encoder->port;
108         u32 reg;
109
110         reg = I915_READ(PORT_HDCP_STATUS(port));
111         return reg & HDCP_STATUS_ENC;
112 }
113
114 static inline bool intel_hdcp2_in_use(struct intel_connector *connector)
115 {
116         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
117         enum port port = connector->encoder->port;
118         u32 reg;
119
120         reg = I915_READ(HDCP2_STATUS_DDI(port));
121         return reg & LINK_ENCRYPTION_STATUS;
122 }
123
124 static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port,
125                                     const struct intel_hdcp_shim *shim)
126 {
127         int ret, read_ret;
128         bool ksv_ready;
129
130         /* Poll for ksv list ready (spec says max time allowed is 5s) */
131         ret = __wait_for(read_ret = shim->read_ksv_ready(intel_dig_port,
132                                                          &ksv_ready),
133                          read_ret || ksv_ready, 5 * 1000 * 1000, 1000,
134                          100 * 1000);
135         if (ret)
136                 return ret;
137         if (read_ret)
138                 return read_ret;
139         if (!ksv_ready)
140                 return -ETIMEDOUT;
141
142         return 0;
143 }
144
145 static bool hdcp_key_loadable(struct drm_i915_private *dev_priv)
146 {
147         struct i915_power_domains *power_domains = &dev_priv->power_domains;
148         struct i915_power_well *power_well;
149         enum i915_power_well_id id;
150         bool enabled = false;
151
152         /*
153          * On HSW and BDW, Display HW loads the Key as soon as Display resumes.
154          * On all BXT+, SW can load the keys only when the PW#1 is turned on.
155          */
156         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
157                 id = HSW_DISP_PW_GLOBAL;
158         else
159                 id = SKL_DISP_PW_1;
160
161         mutex_lock(&power_domains->lock);
162
163         /* PG1 (power well #1) needs to be enabled */
164         for_each_power_well(dev_priv, power_well) {
165                 if (power_well->desc->id == id) {
166                         enabled = power_well->desc->ops->is_enabled(dev_priv,
167                                                                     power_well);
168                         break;
169                 }
170         }
171         mutex_unlock(&power_domains->lock);
172
173         /*
174          * Another req for hdcp key loadability is enabled state of pll for
175          * cdclk. Without active crtc we wont land here. So we are assuming that
176          * cdclk is already on.
177          */
178
179         return enabled;
180 }
181
182 static void intel_hdcp_clear_keys(struct drm_i915_private *dev_priv)
183 {
184         I915_WRITE(HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
185         I915_WRITE(HDCP_KEY_STATUS, HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS |
186                    HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE);
187 }
188
189 static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
190 {
191         int ret;
192         u32 val;
193
194         val = I915_READ(HDCP_KEY_STATUS);
195         if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS))
196                 return 0;
197
198         /*
199          * On HSW and BDW HW loads the HDCP1.4 Key when Display comes
200          * out of reset. So if Key is not already loaded, its an error state.
201          */
202         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
203                 if (!(I915_READ(HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
204                         return -ENXIO;
205
206         /*
207          * Initiate loading the HDCP key from fuses.
208          *
209          * BXT+ platforms, HDCP key needs to be loaded by SW. Only Gen 9
210          * platforms except BXT and GLK, differ in the key load trigger process
211          * from other platforms. So GEN9_BC uses the GT Driver Mailbox i/f.
212          */
213         if (IS_GEN9_BC(dev_priv)) {
214                 mutex_lock(&dev_priv->pcu_lock);
215                 ret = sandybridge_pcode_write(dev_priv,
216                                               SKL_PCODE_LOAD_HDCP_KEYS, 1);
217                 mutex_unlock(&dev_priv->pcu_lock);
218                 if (ret) {
219                         DRM_ERROR("Failed to initiate HDCP key load (%d)\n",
220                                   ret);
221                         return ret;
222                 }
223         } else {
224                 I915_WRITE(HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
225         }
226
227         /* Wait for the keys to load (500us) */
228         ret = __intel_wait_for_register(&dev_priv->uncore, HDCP_KEY_STATUS,
229                                         HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
230                                         10, 1, &val);
231         if (ret)
232                 return ret;
233         else if (!(val & HDCP_KEY_LOAD_STATUS))
234                 return -ENXIO;
235
236         /* Send Aksv over to PCH display for use in authentication */
237         I915_WRITE(HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);
238
239         return 0;
240 }
241
242 /* Returns updated SHA-1 index */
243 static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
244 {
245         I915_WRITE(HDCP_SHA_TEXT, sha_text);
246         if (intel_wait_for_register(&dev_priv->uncore, HDCP_REP_CTL,
247                                     HDCP_SHA1_READY, HDCP_SHA1_READY, 1)) {
248                 DRM_ERROR("Timed out waiting for SHA1 ready\n");
249                 return -ETIMEDOUT;
250         }
251         return 0;
252 }
253
254 static
255 u32 intel_hdcp_get_repeater_ctl(struct intel_digital_port *intel_dig_port)
256 {
257         enum port port = intel_dig_port->base.port;
258         switch (port) {
259         case PORT_A:
260                 return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0;
261         case PORT_B:
262                 return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0;
263         case PORT_C:
264                 return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0;
265         case PORT_D:
266                 return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0;
267         case PORT_E:
268                 return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
269         default:
270                 break;
271         }
272         DRM_ERROR("Unknown port %d\n", port);
273         return -EINVAL;
274 }
275
276 static
277 int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
278                                 const struct intel_hdcp_shim *shim,
279                                 u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
280 {
281         struct drm_i915_private *dev_priv;
282         u32 vprime, sha_text, sha_leftovers, rep_ctl;
283         int ret, i, j, sha_idx;
284
285         dev_priv = intel_dig_port->base.base.dev->dev_private;
286
287         /* Process V' values from the receiver */
288         for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
289                 ret = shim->read_v_prime_part(intel_dig_port, i, &vprime);
290                 if (ret)
291                         return ret;
292                 I915_WRITE(HDCP_SHA_V_PRIME(i), vprime);
293         }
294
295         /*
296          * We need to write the concatenation of all device KSVs, BINFO (DP) ||
297          * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte
298          * stream is written via the HDCP_SHA_TEXT register in 32-bit
299          * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This
300          * index will keep track of our progress through the 64 bytes as well as
301          * helping us work the 40-bit KSVs through our 32-bit register.
302          *
303          * NOTE: data passed via HDCP_SHA_TEXT should be big-endian
304          */
305         sha_idx = 0;
306         sha_text = 0;
307         sha_leftovers = 0;
308         rep_ctl = intel_hdcp_get_repeater_ctl(intel_dig_port);
309         I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
310         for (i = 0; i < num_downstream; i++) {
311                 unsigned int sha_empty;
312                 u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN];
313
314                 /* Fill up the empty slots in sha_text and write it out */
315                 sha_empty = sizeof(sha_text) - sha_leftovers;
316                 for (j = 0; j < sha_empty; j++)
317                         sha_text |= ksv[j] << ((sizeof(sha_text) - j - 1) * 8);
318
319                 ret = intel_write_sha_text(dev_priv, sha_text);
320                 if (ret < 0)
321                         return ret;
322
323                 /* Programming guide writes this every 64 bytes */
324                 sha_idx += sizeof(sha_text);
325                 if (!(sha_idx % 64))
326                         I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
327
328                 /* Store the leftover bytes from the ksv in sha_text */
329                 sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty;
330                 sha_text = 0;
331                 for (j = 0; j < sha_leftovers; j++)
332                         sha_text |= ksv[sha_empty + j] <<
333                                         ((sizeof(sha_text) - j - 1) * 8);
334
335                 /*
336                  * If we still have room in sha_text for more data, continue.
337                  * Otherwise, write it out immediately.
338                  */
339                 if (sizeof(sha_text) > sha_leftovers)
340                         continue;
341
342                 ret = intel_write_sha_text(dev_priv, sha_text);
343                 if (ret < 0)
344                         return ret;
345                 sha_leftovers = 0;
346                 sha_text = 0;
347                 sha_idx += sizeof(sha_text);
348         }
349
350         /*
351          * We need to write BINFO/BSTATUS, and M0 now. Depending on how many
352          * bytes are leftover from the last ksv, we might be able to fit them
353          * all in sha_text (first 2 cases), or we might need to split them up
354          * into 2 writes (last 2 cases).
355          */
356         if (sha_leftovers == 0) {
357                 /* Write 16 bits of text, 16 bits of M0 */
358                 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16);
359                 ret = intel_write_sha_text(dev_priv,
360                                            bstatus[0] << 8 | bstatus[1]);
361                 if (ret < 0)
362                         return ret;
363                 sha_idx += sizeof(sha_text);
364
365                 /* Write 32 bits of M0 */
366                 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
367                 ret = intel_write_sha_text(dev_priv, 0);
368                 if (ret < 0)
369                         return ret;
370                 sha_idx += sizeof(sha_text);
371
372                 /* Write 16 bits of M0 */
373                 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16);
374                 ret = intel_write_sha_text(dev_priv, 0);
375                 if (ret < 0)
376                         return ret;
377                 sha_idx += sizeof(sha_text);
378
379         } else if (sha_leftovers == 1) {
380                 /* Write 24 bits of text, 8 bits of M0 */
381                 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24);
382                 sha_text |= bstatus[0] << 16 | bstatus[1] << 8;
383                 /* Only 24-bits of data, must be in the LSB */
384                 sha_text = (sha_text & 0xffffff00) >> 8;
385                 ret = intel_write_sha_text(dev_priv, sha_text);
386                 if (ret < 0)
387                         return ret;
388                 sha_idx += sizeof(sha_text);
389
390                 /* Write 32 bits of M0 */
391                 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
392                 ret = intel_write_sha_text(dev_priv, 0);
393                 if (ret < 0)
394                         return ret;
395                 sha_idx += sizeof(sha_text);
396
397                 /* Write 24 bits of M0 */
398                 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8);
399                 ret = intel_write_sha_text(dev_priv, 0);
400                 if (ret < 0)
401                         return ret;
402                 sha_idx += sizeof(sha_text);
403
404         } else if (sha_leftovers == 2) {
405                 /* Write 32 bits of text */
406                 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
407                 sha_text |= bstatus[0] << 24 | bstatus[1] << 16;
408                 ret = intel_write_sha_text(dev_priv, sha_text);
409                 if (ret < 0)
410                         return ret;
411                 sha_idx += sizeof(sha_text);
412
413                 /* Write 64 bits of M0 */
414                 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
415                 for (i = 0; i < 2; i++) {
416                         ret = intel_write_sha_text(dev_priv, 0);
417                         if (ret < 0)
418                                 return ret;
419                         sha_idx += sizeof(sha_text);
420                 }
421         } else if (sha_leftovers == 3) {
422                 /* Write 32 bits of text */
423                 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
424                 sha_text |= bstatus[0] << 24;
425                 ret = intel_write_sha_text(dev_priv, sha_text);
426                 if (ret < 0)
427                         return ret;
428                 sha_idx += sizeof(sha_text);
429
430                 /* Write 8 bits of text, 24 bits of M0 */
431                 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8);
432                 ret = intel_write_sha_text(dev_priv, bstatus[1]);
433                 if (ret < 0)
434                         return ret;
435                 sha_idx += sizeof(sha_text);
436
437                 /* Write 32 bits of M0 */
438                 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
439                 ret = intel_write_sha_text(dev_priv, 0);
440                 if (ret < 0)
441                         return ret;
442                 sha_idx += sizeof(sha_text);
443
444                 /* Write 8 bits of M0 */
445                 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24);
446                 ret = intel_write_sha_text(dev_priv, 0);
447                 if (ret < 0)
448                         return ret;
449                 sha_idx += sizeof(sha_text);
450         } else {
451                 DRM_DEBUG_KMS("Invalid number of leftovers %d\n",
452                               sha_leftovers);
453                 return -EINVAL;
454         }
455
456         I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
457         /* Fill up to 64-4 bytes with zeros (leave the last write for length) */
458         while ((sha_idx % 64) < (64 - sizeof(sha_text))) {
459                 ret = intel_write_sha_text(dev_priv, 0);
460                 if (ret < 0)
461                         return ret;
462                 sha_idx += sizeof(sha_text);
463         }
464
465         /*
466          * Last write gets the length of the concatenation in bits. That is:
467          *  - 5 bytes per device
468          *  - 10 bytes for BINFO/BSTATUS(2), M0(8)
469          */
470         sha_text = (num_downstream * 5 + 10) * 8;
471         ret = intel_write_sha_text(dev_priv, sha_text);
472         if (ret < 0)
473                 return ret;
474
475         /* Tell the HW we're done with the hash and wait for it to ACK */
476         I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_COMPLETE_HASH);
477         if (intel_wait_for_register(&dev_priv->uncore, HDCP_REP_CTL,
478                                     HDCP_SHA1_COMPLETE,
479                                     HDCP_SHA1_COMPLETE, 1)) {
480                 DRM_ERROR("Timed out waiting for SHA1 complete\n");
481                 return -ETIMEDOUT;
482         }
483         if (!(I915_READ(HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
484                 DRM_DEBUG_KMS("SHA-1 mismatch, HDCP failed\n");
485                 return -ENXIO;
486         }
487
488         return 0;
489 }
490
491 /* Implements Part 2 of the HDCP authorization procedure */
492 static
493 int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port,
494                                const struct intel_hdcp_shim *shim)
495 {
496         u8 bstatus[2], num_downstream, *ksv_fifo;
497         int ret, i, tries = 3;
498
499         ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim);
500         if (ret) {
501                 DRM_DEBUG_KMS("KSV list failed to become ready (%d)\n", ret);
502                 return ret;
503         }
504
505         ret = shim->read_bstatus(intel_dig_port, bstatus);
506         if (ret)
507                 return ret;
508
509         if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
510             DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
511                 DRM_DEBUG_KMS("Max Topology Limit Exceeded\n");
512                 return -EPERM;
513         }
514
515         /*
516          * When repeater reports 0 device count, HDCP1.4 spec allows disabling
517          * the HDCP encryption. That implies that repeater can't have its own
518          * display. As there is no consumption of encrypted content in the
519          * repeater with 0 downstream devices, we are failing the
520          * authentication.
521          */
522         num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
523         if (num_downstream == 0)
524                 return -EINVAL;
525
526         ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
527         if (!ksv_fifo)
528                 return -ENOMEM;
529
530         ret = shim->read_ksv_fifo(intel_dig_port, num_downstream, ksv_fifo);
531         if (ret)
532                 goto err;
533
534         /*
535          * When V prime mismatches, DP Spec mandates re-read of
536          * V prime atleast twice.
537          */
538         for (i = 0; i < tries; i++) {
539                 ret = intel_hdcp_validate_v_prime(intel_dig_port, shim,
540                                                   ksv_fifo, num_downstream,
541                                                   bstatus);
542                 if (!ret)
543                         break;
544         }
545
546         if (i == tries) {
547                 DRM_DEBUG_KMS("V Prime validation failed.(%d)\n", ret);
548                 goto err;
549         }
550
551         DRM_DEBUG_KMS("HDCP is enabled (%d downstream devices)\n",
552                       num_downstream);
553         ret = 0;
554 err:
555         kfree(ksv_fifo);
556         return ret;
557 }
558
559 /* Implements Part 1 of the HDCP authorization procedure */
560 static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
561                            const struct intel_hdcp_shim *shim)
562 {
563         struct drm_i915_private *dev_priv;
564         enum port port;
565         unsigned long r0_prime_gen_start;
566         int ret, i, tries = 2;
567         union {
568                 u32 reg[2];
569                 u8 shim[DRM_HDCP_AN_LEN];
570         } an;
571         union {
572                 u32 reg[2];
573                 u8 shim[DRM_HDCP_KSV_LEN];
574         } bksv;
575         union {
576                 u32 reg;
577                 u8 shim[DRM_HDCP_RI_LEN];
578         } ri;
579         bool repeater_present, hdcp_capable;
580
581         dev_priv = intel_dig_port->base.base.dev->dev_private;
582
583         port = intel_dig_port->base.port;
584
585         /*
586          * Detects whether the display is HDCP capable. Although we check for
587          * valid Bksv below, the HDCP over DP spec requires that we check
588          * whether the display supports HDCP before we write An. For HDMI
589          * displays, this is not necessary.
590          */
591         if (shim->hdcp_capable) {
592                 ret = shim->hdcp_capable(intel_dig_port, &hdcp_capable);
593                 if (ret)
594                         return ret;
595                 if (!hdcp_capable) {
596                         DRM_DEBUG_KMS("Panel is not HDCP capable\n");
597                         return -EINVAL;
598                 }
599         }
600
601         /* Initialize An with 2 random values and acquire it */
602         for (i = 0; i < 2; i++)
603                 I915_WRITE(PORT_HDCP_ANINIT(port), get_random_u32());
604         I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_CAPTURE_AN);
605
606         /* Wait for An to be acquired */
607         if (intel_wait_for_register(&dev_priv->uncore, PORT_HDCP_STATUS(port),
608                                     HDCP_STATUS_AN_READY,
609                                     HDCP_STATUS_AN_READY, 1)) {
610                 DRM_ERROR("Timed out waiting for An\n");
611                 return -ETIMEDOUT;
612         }
613
614         an.reg[0] = I915_READ(PORT_HDCP_ANLO(port));
615         an.reg[1] = I915_READ(PORT_HDCP_ANHI(port));
616         ret = shim->write_an_aksv(intel_dig_port, an.shim);
617         if (ret)
618                 return ret;
619
620         r0_prime_gen_start = jiffies;
621
622         memset(&bksv, 0, sizeof(bksv));
623
624         ret = intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv.shim);
625         if (ret < 0)
626                 return ret;
627
628         I915_WRITE(PORT_HDCP_BKSVLO(port), bksv.reg[0]);
629         I915_WRITE(PORT_HDCP_BKSVHI(port), bksv.reg[1]);
630
631         ret = shim->repeater_present(intel_dig_port, &repeater_present);
632         if (ret)
633                 return ret;
634         if (repeater_present)
635                 I915_WRITE(HDCP_REP_CTL,
636                            intel_hdcp_get_repeater_ctl(intel_dig_port));
637
638         ret = shim->toggle_signalling(intel_dig_port, true);
639         if (ret)
640                 return ret;
641
642         I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_AUTH_AND_ENC);
643
644         /* Wait for R0 ready */
645         if (wait_for(I915_READ(PORT_HDCP_STATUS(port)) &
646                      (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
647                 DRM_ERROR("Timed out waiting for R0 ready\n");
648                 return -ETIMEDOUT;
649         }
650
651         /*
652          * Wait for R0' to become available. The spec says 100ms from Aksv, but
653          * some monitors can take longer than this. We'll set the timeout at
654          * 300ms just to be sure.
655          *
656          * On DP, there's an R0_READY bit available but no such bit
657          * exists on HDMI. Since the upper-bound is the same, we'll just do
658          * the stupid thing instead of polling on one and not the other.
659          */
660         wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300);
661
662         tries = 3;
663
664         /*
665          * DP HDCP Spec mandates the two more reattempt to read R0, incase
666          * of R0 mismatch.
667          */
668         for (i = 0; i < tries; i++) {
669                 ri.reg = 0;
670                 ret = shim->read_ri_prime(intel_dig_port, ri.shim);
671                 if (ret)
672                         return ret;
673                 I915_WRITE(PORT_HDCP_RPRIME(port), ri.reg);
674
675                 /* Wait for Ri prime match */
676                 if (!wait_for(I915_READ(PORT_HDCP_STATUS(port)) &
677                     (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
678                         break;
679         }
680
681         if (i == tries) {
682                 DRM_DEBUG_KMS("Timed out waiting for Ri prime match (%x)\n",
683                               I915_READ(PORT_HDCP_STATUS(port)));
684                 return -ETIMEDOUT;
685         }
686
687         /* Wait for encryption confirmation */
688         if (intel_wait_for_register(&dev_priv->uncore, PORT_HDCP_STATUS(port),
689                                     HDCP_STATUS_ENC, HDCP_STATUS_ENC,
690                                     ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
691                 DRM_ERROR("Timed out waiting for encryption\n");
692                 return -ETIMEDOUT;
693         }
694
695         /*
696          * XXX: If we have MST-connected devices, we need to enable encryption
697          * on those as well.
698          */
699
700         if (repeater_present)
701                 return intel_hdcp_auth_downstream(intel_dig_port, shim);
702
703         DRM_DEBUG_KMS("HDCP is enabled (no repeater present)\n");
704         return 0;
705 }
706
707 static int _intel_hdcp_disable(struct intel_connector *connector)
708 {
709         struct intel_hdcp *hdcp = &connector->hdcp;
710         struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
711         struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
712         enum port port = intel_dig_port->base.port;
713         int ret;
714
715         DRM_DEBUG_KMS("[%s:%d] HDCP is being disabled...\n",
716                       connector->base.name, connector->base.base.id);
717
718         hdcp->hdcp_encrypted = false;
719         I915_WRITE(PORT_HDCP_CONF(port), 0);
720         if (intel_wait_for_register(&dev_priv->uncore,
721                                     PORT_HDCP_STATUS(port), ~0, 0,
722                                     ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
723                 DRM_ERROR("Failed to disable HDCP, timeout clearing status\n");
724                 return -ETIMEDOUT;
725         }
726
727         ret = hdcp->shim->toggle_signalling(intel_dig_port, false);
728         if (ret) {
729                 DRM_ERROR("Failed to disable HDCP signalling\n");
730                 return ret;
731         }
732
733         DRM_DEBUG_KMS("HDCP is disabled\n");
734         return 0;
735 }
736
737 static int _intel_hdcp_enable(struct intel_connector *connector)
738 {
739         struct intel_hdcp *hdcp = &connector->hdcp;
740         struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
741         int i, ret, tries = 3;
742
743         DRM_DEBUG_KMS("[%s:%d] HDCP is being enabled...\n",
744                       connector->base.name, connector->base.base.id);
745
746         if (!hdcp_key_loadable(dev_priv)) {
747                 DRM_ERROR("HDCP key Load is not possible\n");
748                 return -ENXIO;
749         }
750
751         for (i = 0; i < KEY_LOAD_TRIES; i++) {
752                 ret = intel_hdcp_load_keys(dev_priv);
753                 if (!ret)
754                         break;
755                 intel_hdcp_clear_keys(dev_priv);
756         }
757         if (ret) {
758                 DRM_ERROR("Could not load HDCP keys, (%d)\n", ret);
759                 return ret;
760         }
761
762         /* Incase of authentication failures, HDCP spec expects reauth. */
763         for (i = 0; i < tries; i++) {
764                 ret = intel_hdcp_auth(conn_to_dig_port(connector), hdcp->shim);
765                 if (!ret) {
766                         hdcp->hdcp_encrypted = true;
767                         return 0;
768                 }
769
770                 DRM_DEBUG_KMS("HDCP Auth failure (%d)\n", ret);
771
772                 /* Ensuring HDCP encryption and signalling are stopped. */
773                 _intel_hdcp_disable(connector);
774         }
775
776         DRM_DEBUG_KMS("HDCP authentication failed (%d tries/%d)\n", tries, ret);
777         return ret;
778 }
779
780 static inline
781 struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
782 {
783         return container_of(hdcp, struct intel_connector, hdcp);
784 }
785
786 /* Implements Part 3 of the HDCP authorization procedure */
787 static int intel_hdcp_check_link(struct intel_connector *connector)
788 {
789         struct intel_hdcp *hdcp = &connector->hdcp;
790         struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
791         struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
792         enum port port = intel_dig_port->base.port;
793         int ret = 0;
794
795         mutex_lock(&hdcp->mutex);
796
797         /* Check_link valid only when HDCP1.4 is enabled */
798         if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
799             !hdcp->hdcp_encrypted) {
800                 ret = -EINVAL;
801                 goto out;
802         }
803
804         if (WARN_ON(!intel_hdcp_in_use(connector))) {
805                 DRM_ERROR("%s:%d HDCP link stopped encryption,%x\n",
806                           connector->base.name, connector->base.base.id,
807                           I915_READ(PORT_HDCP_STATUS(port)));
808                 ret = -ENXIO;
809                 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
810                 schedule_work(&hdcp->prop_work);
811                 goto out;
812         }
813
814         if (hdcp->shim->check_link(intel_dig_port)) {
815                 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
816                         hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
817                         schedule_work(&hdcp->prop_work);
818                 }
819                 goto out;
820         }
821
822         DRM_DEBUG_KMS("[%s:%d] HDCP link failed, retrying authentication\n",
823                       connector->base.name, connector->base.base.id);
824
825         ret = _intel_hdcp_disable(connector);
826         if (ret) {
827                 DRM_ERROR("Failed to disable hdcp (%d)\n", ret);
828                 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
829                 schedule_work(&hdcp->prop_work);
830                 goto out;
831         }
832
833         ret = _intel_hdcp_enable(connector);
834         if (ret) {
835                 DRM_ERROR("Failed to enable hdcp (%d)\n", ret);
836                 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
837                 schedule_work(&hdcp->prop_work);
838                 goto out;
839         }
840
841 out:
842         mutex_unlock(&hdcp->mutex);
843         return ret;
844 }
845
846 static void intel_hdcp_prop_work(struct work_struct *work)
847 {
848         struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp,
849                                                prop_work);
850         struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
851         struct drm_device *dev = connector->base.dev;
852         struct drm_connector_state *state;
853
854         drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
855         mutex_lock(&hdcp->mutex);
856
857         /*
858          * This worker is only used to flip between ENABLED/DESIRED. Either of
859          * those to UNDESIRED is handled by core. If value == UNDESIRED,
860          * we're running just after hdcp has been disabled, so just exit
861          */
862         if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
863                 state = connector->base.state;
864                 state->content_protection = hdcp->value;
865         }
866
867         mutex_unlock(&hdcp->mutex);
868         drm_modeset_unlock(&dev->mode_config.connection_mutex);
869 }
870
871 bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
872 {
873         /* PORT E doesn't have HDCP, and PORT F is disabled */
874         return INTEL_GEN(dev_priv) >= 9 && port < PORT_E;
875 }
876
877 static int
878 hdcp2_prepare_ake_init(struct intel_connector *connector,
879                        struct hdcp2_ake_init *ake_data)
880 {
881         struct hdcp_port_data *data = &connector->hdcp.port_data;
882         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
883         struct i915_hdcp_comp_master *comp;
884         int ret;
885
886         mutex_lock(&dev_priv->hdcp_comp_mutex);
887         comp = dev_priv->hdcp_master;
888
889         if (!comp || !comp->ops) {
890                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
891                 return -EINVAL;
892         }
893
894         ret = comp->ops->initiate_hdcp2_session(comp->mei_dev, data, ake_data);
895         if (ret)
896                 DRM_DEBUG_KMS("Prepare_ake_init failed. %d\n", ret);
897         mutex_unlock(&dev_priv->hdcp_comp_mutex);
898
899         return ret;
900 }
901
902 static int
903 hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
904                                 struct hdcp2_ake_send_cert *rx_cert,
905                                 bool *paired,
906                                 struct hdcp2_ake_no_stored_km *ek_pub_km,
907                                 size_t *msg_sz)
908 {
909         struct hdcp_port_data *data = &connector->hdcp.port_data;
910         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
911         struct i915_hdcp_comp_master *comp;
912         int ret;
913
914         mutex_lock(&dev_priv->hdcp_comp_mutex);
915         comp = dev_priv->hdcp_master;
916
917         if (!comp || !comp->ops) {
918                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
919                 return -EINVAL;
920         }
921
922         ret = comp->ops->verify_receiver_cert_prepare_km(comp->mei_dev, data,
923                                                          rx_cert, paired,
924                                                          ek_pub_km, msg_sz);
925         if (ret < 0)
926                 DRM_DEBUG_KMS("Verify rx_cert failed. %d\n", ret);
927         mutex_unlock(&dev_priv->hdcp_comp_mutex);
928
929         return ret;
930 }
931
932 static int hdcp2_verify_hprime(struct intel_connector *connector,
933                                struct hdcp2_ake_send_hprime *rx_hprime)
934 {
935         struct hdcp_port_data *data = &connector->hdcp.port_data;
936         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
937         struct i915_hdcp_comp_master *comp;
938         int ret;
939
940         mutex_lock(&dev_priv->hdcp_comp_mutex);
941         comp = dev_priv->hdcp_master;
942
943         if (!comp || !comp->ops) {
944                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
945                 return -EINVAL;
946         }
947
948         ret = comp->ops->verify_hprime(comp->mei_dev, data, rx_hprime);
949         if (ret < 0)
950                 DRM_DEBUG_KMS("Verify hprime failed. %d\n", ret);
951         mutex_unlock(&dev_priv->hdcp_comp_mutex);
952
953         return ret;
954 }
955
956 static int
957 hdcp2_store_pairing_info(struct intel_connector *connector,
958                          struct hdcp2_ake_send_pairing_info *pairing_info)
959 {
960         struct hdcp_port_data *data = &connector->hdcp.port_data;
961         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
962         struct i915_hdcp_comp_master *comp;
963         int ret;
964
965         mutex_lock(&dev_priv->hdcp_comp_mutex);
966         comp = dev_priv->hdcp_master;
967
968         if (!comp || !comp->ops) {
969                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
970                 return -EINVAL;
971         }
972
973         ret = comp->ops->store_pairing_info(comp->mei_dev, data, pairing_info);
974         if (ret < 0)
975                 DRM_DEBUG_KMS("Store pairing info failed. %d\n", ret);
976         mutex_unlock(&dev_priv->hdcp_comp_mutex);
977
978         return ret;
979 }
980
981 static int
982 hdcp2_prepare_lc_init(struct intel_connector *connector,
983                       struct hdcp2_lc_init *lc_init)
984 {
985         struct hdcp_port_data *data = &connector->hdcp.port_data;
986         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
987         struct i915_hdcp_comp_master *comp;
988         int ret;
989
990         mutex_lock(&dev_priv->hdcp_comp_mutex);
991         comp = dev_priv->hdcp_master;
992
993         if (!comp || !comp->ops) {
994                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
995                 return -EINVAL;
996         }
997
998         ret = comp->ops->initiate_locality_check(comp->mei_dev, data, lc_init);
999         if (ret < 0)
1000                 DRM_DEBUG_KMS("Prepare lc_init failed. %d\n", ret);
1001         mutex_unlock(&dev_priv->hdcp_comp_mutex);
1002
1003         return ret;
1004 }
1005
1006 static int
1007 hdcp2_verify_lprime(struct intel_connector *connector,
1008                     struct hdcp2_lc_send_lprime *rx_lprime)
1009 {
1010         struct hdcp_port_data *data = &connector->hdcp.port_data;
1011         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1012         struct i915_hdcp_comp_master *comp;
1013         int ret;
1014
1015         mutex_lock(&dev_priv->hdcp_comp_mutex);
1016         comp = dev_priv->hdcp_master;
1017
1018         if (!comp || !comp->ops) {
1019                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1020                 return -EINVAL;
1021         }
1022
1023         ret = comp->ops->verify_lprime(comp->mei_dev, data, rx_lprime);
1024         if (ret < 0)
1025                 DRM_DEBUG_KMS("Verify L_Prime failed. %d\n", ret);
1026         mutex_unlock(&dev_priv->hdcp_comp_mutex);
1027
1028         return ret;
1029 }
1030
1031 static int hdcp2_prepare_skey(struct intel_connector *connector,
1032                               struct hdcp2_ske_send_eks *ske_data)
1033 {
1034         struct hdcp_port_data *data = &connector->hdcp.port_data;
1035         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1036         struct i915_hdcp_comp_master *comp;
1037         int ret;
1038
1039         mutex_lock(&dev_priv->hdcp_comp_mutex);
1040         comp = dev_priv->hdcp_master;
1041
1042         if (!comp || !comp->ops) {
1043                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1044                 return -EINVAL;
1045         }
1046
1047         ret = comp->ops->get_session_key(comp->mei_dev, data, ske_data);
1048         if (ret < 0)
1049                 DRM_DEBUG_KMS("Get session key failed. %d\n", ret);
1050         mutex_unlock(&dev_priv->hdcp_comp_mutex);
1051
1052         return ret;
1053 }
1054
1055 static int
1056 hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
1057                                       struct hdcp2_rep_send_receiverid_list
1058                                                                 *rep_topology,
1059                                       struct hdcp2_rep_send_ack *rep_send_ack)
1060 {
1061         struct hdcp_port_data *data = &connector->hdcp.port_data;
1062         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1063         struct i915_hdcp_comp_master *comp;
1064         int ret;
1065
1066         mutex_lock(&dev_priv->hdcp_comp_mutex);
1067         comp = dev_priv->hdcp_master;
1068
1069         if (!comp || !comp->ops) {
1070                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1071                 return -EINVAL;
1072         }
1073
1074         ret = comp->ops->repeater_check_flow_prepare_ack(comp->mei_dev, data,
1075                                                          rep_topology,
1076                                                          rep_send_ack);
1077         if (ret < 0)
1078                 DRM_DEBUG_KMS("Verify rep topology failed. %d\n", ret);
1079         mutex_unlock(&dev_priv->hdcp_comp_mutex);
1080
1081         return ret;
1082 }
1083
1084 static int
1085 hdcp2_verify_mprime(struct intel_connector *connector,
1086                     struct hdcp2_rep_stream_ready *stream_ready)
1087 {
1088         struct hdcp_port_data *data = &connector->hdcp.port_data;
1089         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1090         struct i915_hdcp_comp_master *comp;
1091         int ret;
1092
1093         mutex_lock(&dev_priv->hdcp_comp_mutex);
1094         comp = dev_priv->hdcp_master;
1095
1096         if (!comp || !comp->ops) {
1097                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1098                 return -EINVAL;
1099         }
1100
1101         ret = comp->ops->verify_mprime(comp->mei_dev, data, stream_ready);
1102         if (ret < 0)
1103                 DRM_DEBUG_KMS("Verify mprime failed. %d\n", ret);
1104         mutex_unlock(&dev_priv->hdcp_comp_mutex);
1105
1106         return ret;
1107 }
1108
1109 static int hdcp2_authenticate_port(struct intel_connector *connector)
1110 {
1111         struct hdcp_port_data *data = &connector->hdcp.port_data;
1112         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1113         struct i915_hdcp_comp_master *comp;
1114         int ret;
1115
1116         mutex_lock(&dev_priv->hdcp_comp_mutex);
1117         comp = dev_priv->hdcp_master;
1118
1119         if (!comp || !comp->ops) {
1120                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1121                 return -EINVAL;
1122         }
1123
1124         ret = comp->ops->enable_hdcp_authentication(comp->mei_dev, data);
1125         if (ret < 0)
1126                 DRM_DEBUG_KMS("Enable hdcp auth failed. %d\n", ret);
1127         mutex_unlock(&dev_priv->hdcp_comp_mutex);
1128
1129         return ret;
1130 }
1131
1132 static int hdcp2_close_mei_session(struct intel_connector *connector)
1133 {
1134         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1135         struct i915_hdcp_comp_master *comp;
1136         int ret;
1137
1138         mutex_lock(&dev_priv->hdcp_comp_mutex);
1139         comp = dev_priv->hdcp_master;
1140
1141         if (!comp || !comp->ops) {
1142                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1143                 return -EINVAL;
1144         }
1145
1146         ret = comp->ops->close_hdcp_session(comp->mei_dev,
1147                                              &connector->hdcp.port_data);
1148         mutex_unlock(&dev_priv->hdcp_comp_mutex);
1149
1150         return ret;
1151 }
1152
1153 static int hdcp2_deauthenticate_port(struct intel_connector *connector)
1154 {
1155         return hdcp2_close_mei_session(connector);
1156 }
1157
1158 /* Authentication flow starts from here */
1159 static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
1160 {
1161         struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
1162         struct intel_hdcp *hdcp = &connector->hdcp;
1163         union {
1164                 struct hdcp2_ake_init ake_init;
1165                 struct hdcp2_ake_send_cert send_cert;
1166                 struct hdcp2_ake_no_stored_km no_stored_km;
1167                 struct hdcp2_ake_send_hprime send_hprime;
1168                 struct hdcp2_ake_send_pairing_info pairing_info;
1169         } msgs;
1170         const struct intel_hdcp_shim *shim = hdcp->shim;
1171         size_t size;
1172         int ret;
1173
1174         /* Init for seq_num */
1175         hdcp->seq_num_v = 0;
1176         hdcp->seq_num_m = 0;
1177
1178         ret = hdcp2_prepare_ake_init(connector, &msgs.ake_init);
1179         if (ret < 0)
1180                 return ret;
1181
1182         ret = shim->write_2_2_msg(intel_dig_port, &msgs.ake_init,
1183                                   sizeof(msgs.ake_init));
1184         if (ret < 0)
1185                 return ret;
1186
1187         ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_AKE_SEND_CERT,
1188                                  &msgs.send_cert, sizeof(msgs.send_cert));
1189         if (ret < 0)
1190                 return ret;
1191
1192         if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL)
1193                 return -EINVAL;
1194
1195         hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]);
1196
1197         /*
1198          * Here msgs.no_stored_km will hold msgs corresponding to the km
1199          * stored also.
1200          */
1201         ret = hdcp2_verify_rx_cert_prepare_km(connector, &msgs.send_cert,
1202                                               &hdcp->is_paired,
1203                                               &msgs.no_stored_km, &size);
1204         if (ret < 0)
1205                 return ret;
1206
1207         ret = shim->write_2_2_msg(intel_dig_port, &msgs.no_stored_km, size);
1208         if (ret < 0)
1209                 return ret;
1210
1211         ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_AKE_SEND_HPRIME,
1212                                  &msgs.send_hprime, sizeof(msgs.send_hprime));
1213         if (ret < 0)
1214                 return ret;
1215
1216         ret = hdcp2_verify_hprime(connector, &msgs.send_hprime);
1217         if (ret < 0)
1218                 return ret;
1219
1220         if (!hdcp->is_paired) {
1221                 /* Pairing is required */
1222                 ret = shim->read_2_2_msg(intel_dig_port,
1223                                          HDCP_2_2_AKE_SEND_PAIRING_INFO,
1224                                          &msgs.pairing_info,
1225                                          sizeof(msgs.pairing_info));
1226                 if (ret < 0)
1227                         return ret;
1228
1229                 ret = hdcp2_store_pairing_info(connector, &msgs.pairing_info);
1230                 if (ret < 0)
1231                         return ret;
1232                 hdcp->is_paired = true;
1233         }
1234
1235         return 0;
1236 }
1237
1238 static int hdcp2_locality_check(struct intel_connector *connector)
1239 {
1240         struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
1241         struct intel_hdcp *hdcp = &connector->hdcp;
1242         union {
1243                 struct hdcp2_lc_init lc_init;
1244                 struct hdcp2_lc_send_lprime send_lprime;
1245         } msgs;
1246         const struct intel_hdcp_shim *shim = hdcp->shim;
1247         int tries = HDCP2_LC_RETRY_CNT, ret, i;
1248
1249         for (i = 0; i < tries; i++) {
1250                 ret = hdcp2_prepare_lc_init(connector, &msgs.lc_init);
1251                 if (ret < 0)
1252                         continue;
1253
1254                 ret = shim->write_2_2_msg(intel_dig_port, &msgs.lc_init,
1255                                       sizeof(msgs.lc_init));
1256                 if (ret < 0)
1257                         continue;
1258
1259                 ret = shim->read_2_2_msg(intel_dig_port,
1260                                          HDCP_2_2_LC_SEND_LPRIME,
1261                                          &msgs.send_lprime,
1262                                          sizeof(msgs.send_lprime));
1263                 if (ret < 0)
1264                         continue;
1265
1266                 ret = hdcp2_verify_lprime(connector, &msgs.send_lprime);
1267                 if (!ret)
1268                         break;
1269         }
1270
1271         return ret;
1272 }
1273
1274 static int hdcp2_session_key_exchange(struct intel_connector *connector)
1275 {
1276         struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
1277         struct intel_hdcp *hdcp = &connector->hdcp;
1278         struct hdcp2_ske_send_eks send_eks;
1279         int ret;
1280
1281         ret = hdcp2_prepare_skey(connector, &send_eks);
1282         if (ret < 0)
1283                 return ret;
1284
1285         ret = hdcp->shim->write_2_2_msg(intel_dig_port, &send_eks,
1286                                         sizeof(send_eks));
1287         if (ret < 0)
1288                 return ret;
1289
1290         return 0;
1291 }
1292
1293 static
1294 int hdcp2_propagate_stream_management_info(struct intel_connector *connector)
1295 {
1296         struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
1297         struct intel_hdcp *hdcp = &connector->hdcp;
1298         union {
1299                 struct hdcp2_rep_stream_manage stream_manage;
1300                 struct hdcp2_rep_stream_ready stream_ready;
1301         } msgs;
1302         const struct intel_hdcp_shim *shim = hdcp->shim;
1303         int ret;
1304
1305         /* Prepare RepeaterAuth_Stream_Manage msg */
1306         msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE;
1307         drm_hdcp2_u32_to_seq_num(msgs.stream_manage.seq_num_m, hdcp->seq_num_m);
1308
1309         /* K no of streams is fixed as 1. Stored as big-endian. */
1310         msgs.stream_manage.k = cpu_to_be16(1);
1311
1312         /* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
1313         msgs.stream_manage.streams[0].stream_id = 0;
1314         msgs.stream_manage.streams[0].stream_type = hdcp->content_type;
1315
1316         /* Send it to Repeater */
1317         ret = shim->write_2_2_msg(intel_dig_port, &msgs.stream_manage,
1318                                   sizeof(msgs.stream_manage));
1319         if (ret < 0)
1320                 return ret;
1321
1322         ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_REP_STREAM_READY,
1323                                  &msgs.stream_ready, sizeof(msgs.stream_ready));
1324         if (ret < 0)
1325                 return ret;
1326
1327         hdcp->port_data.seq_num_m = hdcp->seq_num_m;
1328         hdcp->port_data.streams[0].stream_type = hdcp->content_type;
1329
1330         ret = hdcp2_verify_mprime(connector, &msgs.stream_ready);
1331         if (ret < 0)
1332                 return ret;
1333
1334         hdcp->seq_num_m++;
1335
1336         if (hdcp->seq_num_m > HDCP_2_2_SEQ_NUM_MAX) {
1337                 DRM_DEBUG_KMS("seq_num_m roll over.\n");
1338                 return -1;
1339         }
1340
1341         return 0;
1342 }
1343
1344 static
1345 int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
1346 {
1347         struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
1348         struct intel_hdcp *hdcp = &connector->hdcp;
1349         union {
1350                 struct hdcp2_rep_send_receiverid_list recvid_list;
1351                 struct hdcp2_rep_send_ack rep_ack;
1352         } msgs;
1353         const struct intel_hdcp_shim *shim = hdcp->shim;
1354         u8 *rx_info;
1355         u32 seq_num_v;
1356         int ret;
1357
1358         ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_REP_SEND_RECVID_LIST,
1359                                  &msgs.recvid_list, sizeof(msgs.recvid_list));
1360         if (ret < 0)
1361                 return ret;
1362
1363         rx_info = msgs.recvid_list.rx_info;
1364
1365         if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) ||
1366             HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) {
1367                 DRM_DEBUG_KMS("Topology Max Size Exceeded\n");
1368                 return -EINVAL;
1369         }
1370
1371         /* Converting and Storing the seq_num_v to local variable as DWORD */
1372         seq_num_v = drm_hdcp2_seq_num_to_u32(msgs.recvid_list.seq_num_v);
1373
1374         if (seq_num_v < hdcp->seq_num_v) {
1375                 /* Roll over of the seq_num_v from repeater. Reauthenticate. */
1376                 DRM_DEBUG_KMS("Seq_num_v roll over.\n");
1377                 return -EINVAL;
1378         }
1379
1380         ret = hdcp2_verify_rep_topology_prepare_ack(connector,
1381                                                     &msgs.recvid_list,
1382                                                     &msgs.rep_ack);
1383         if (ret < 0)
1384                 return ret;
1385
1386         hdcp->seq_num_v = seq_num_v;
1387         ret = shim->write_2_2_msg(intel_dig_port, &msgs.rep_ack,
1388                                   sizeof(msgs.rep_ack));
1389         if (ret < 0)
1390                 return ret;
1391
1392         return 0;
1393 }
1394
1395 static int hdcp2_authenticate_repeater(struct intel_connector *connector)
1396 {
1397         int ret;
1398
1399         ret = hdcp2_authenticate_repeater_topology(connector);
1400         if (ret < 0)
1401                 return ret;
1402
1403         return hdcp2_propagate_stream_management_info(connector);
1404 }
1405
1406 static int hdcp2_authenticate_sink(struct intel_connector *connector)
1407 {
1408         struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
1409         struct intel_hdcp *hdcp = &connector->hdcp;
1410         const struct intel_hdcp_shim *shim = hdcp->shim;
1411         int ret;
1412
1413         ret = hdcp2_authentication_key_exchange(connector);
1414         if (ret < 0) {
1415                 DRM_DEBUG_KMS("AKE Failed. Err : %d\n", ret);
1416                 return ret;
1417         }
1418
1419         ret = hdcp2_locality_check(connector);
1420         if (ret < 0) {
1421                 DRM_DEBUG_KMS("Locality Check failed. Err : %d\n", ret);
1422                 return ret;
1423         }
1424
1425         ret = hdcp2_session_key_exchange(connector);
1426         if (ret < 0) {
1427                 DRM_DEBUG_KMS("SKE Failed. Err : %d\n", ret);
1428                 return ret;
1429         }
1430
1431         if (shim->config_stream_type) {
1432                 ret = shim->config_stream_type(intel_dig_port,
1433                                                hdcp->is_repeater,
1434                                                hdcp->content_type);
1435                 if (ret < 0)
1436                         return ret;
1437         }
1438
1439         if (hdcp->is_repeater) {
1440                 ret = hdcp2_authenticate_repeater(connector);
1441                 if (ret < 0) {
1442                         DRM_DEBUG_KMS("Repeater Auth Failed. Err: %d\n", ret);
1443                         return ret;
1444                 }
1445         }
1446
1447         hdcp->port_data.streams[0].stream_type = hdcp->content_type;
1448         ret = hdcp2_authenticate_port(connector);
1449         if (ret < 0)
1450                 return ret;
1451
1452         return ret;
1453 }
1454
1455 static int hdcp2_enable_encryption(struct intel_connector *connector)
1456 {
1457         struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
1458         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1459         struct intel_hdcp *hdcp = &connector->hdcp;
1460         enum port port = connector->encoder->port;
1461         int ret;
1462
1463         WARN_ON(I915_READ(HDCP2_STATUS_DDI(port)) & LINK_ENCRYPTION_STATUS);
1464
1465         if (hdcp->shim->toggle_signalling) {
1466                 ret = hdcp->shim->toggle_signalling(intel_dig_port, true);
1467                 if (ret) {
1468                         DRM_ERROR("Failed to enable HDCP signalling. %d\n",
1469                                   ret);
1470                         return ret;
1471                 }
1472         }
1473
1474         if (I915_READ(HDCP2_STATUS_DDI(port)) & LINK_AUTH_STATUS) {
1475                 /* Link is Authenticated. Now set for Encryption */
1476                 I915_WRITE(HDCP2_CTL_DDI(port),
1477                            I915_READ(HDCP2_CTL_DDI(port)) |
1478                            CTL_LINK_ENCRYPTION_REQ);
1479         }
1480
1481         ret = intel_wait_for_register(&dev_priv->uncore, HDCP2_STATUS_DDI(port),
1482                                       LINK_ENCRYPTION_STATUS,
1483                                       LINK_ENCRYPTION_STATUS,
1484                                       ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1485
1486         return ret;
1487 }
1488
1489 static int hdcp2_disable_encryption(struct intel_connector *connector)
1490 {
1491         struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
1492         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1493         struct intel_hdcp *hdcp = &connector->hdcp;
1494         enum port port = connector->encoder->port;
1495         int ret;
1496
1497         WARN_ON(!(I915_READ(HDCP2_STATUS_DDI(port)) & LINK_ENCRYPTION_STATUS));
1498
1499         I915_WRITE(HDCP2_CTL_DDI(port),
1500                    I915_READ(HDCP2_CTL_DDI(port)) & ~CTL_LINK_ENCRYPTION_REQ);
1501
1502         ret = intel_wait_for_register(&dev_priv->uncore, HDCP2_STATUS_DDI(port),
1503                                       LINK_ENCRYPTION_STATUS, 0x0,
1504                                       ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1505         if (ret == -ETIMEDOUT)
1506                 DRM_DEBUG_KMS("Disable Encryption Timedout");
1507
1508         if (hdcp->shim->toggle_signalling) {
1509                 ret = hdcp->shim->toggle_signalling(intel_dig_port, false);
1510                 if (ret) {
1511                         DRM_ERROR("Failed to disable HDCP signalling. %d\n",
1512                                   ret);
1513                         return ret;
1514                 }
1515         }
1516
1517         return ret;
1518 }
1519
1520 static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
1521 {
1522         int ret, i, tries = 3;
1523
1524         for (i = 0; i < tries; i++) {
1525                 ret = hdcp2_authenticate_sink(connector);
1526                 if (!ret)
1527                         break;
1528
1529                 /* Clearing the mei hdcp session */
1530                 DRM_DEBUG_KMS("HDCP2.2 Auth %d of %d Failed.(%d)\n",
1531                               i + 1, tries, ret);
1532                 if (hdcp2_deauthenticate_port(connector) < 0)
1533                         DRM_DEBUG_KMS("Port deauth failed.\n");
1534         }
1535
1536         if (i != tries) {
1537                 /*
1538                  * Ensuring the required 200mSec min time interval between
1539                  * Session Key Exchange and encryption.
1540                  */
1541                 msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN);
1542                 ret = hdcp2_enable_encryption(connector);
1543                 if (ret < 0) {
1544                         DRM_DEBUG_KMS("Encryption Enable Failed.(%d)\n", ret);
1545                         if (hdcp2_deauthenticate_port(connector) < 0)
1546                                 DRM_DEBUG_KMS("Port deauth failed.\n");
1547                 }
1548         }
1549
1550         return ret;
1551 }
1552
1553 static int _intel_hdcp2_enable(struct intel_connector *connector)
1554 {
1555         struct intel_hdcp *hdcp = &connector->hdcp;
1556         int ret;
1557
1558         DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is being enabled. Type: %d\n",
1559                       connector->base.name, connector->base.base.id,
1560                       hdcp->content_type);
1561
1562         ret = hdcp2_authenticate_and_encrypt(connector);
1563         if (ret) {
1564                 DRM_DEBUG_KMS("HDCP2 Type%d  Enabling Failed. (%d)\n",
1565                               hdcp->content_type, ret);
1566                 return ret;
1567         }
1568
1569         DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is enabled. Type %d\n",
1570                       connector->base.name, connector->base.base.id,
1571                       hdcp->content_type);
1572
1573         hdcp->hdcp2_encrypted = true;
1574         return 0;
1575 }
1576
1577 static int _intel_hdcp2_disable(struct intel_connector *connector)
1578 {
1579         int ret;
1580
1581         DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is being Disabled\n",
1582                       connector->base.name, connector->base.base.id);
1583
1584         ret = hdcp2_disable_encryption(connector);
1585
1586         if (hdcp2_deauthenticate_port(connector) < 0)
1587                 DRM_DEBUG_KMS("Port deauth failed.\n");
1588
1589         connector->hdcp.hdcp2_encrypted = false;
1590
1591         return ret;
1592 }
1593
1594 /* Implements the Link Integrity Check for HDCP2.2 */
1595 static int intel_hdcp2_check_link(struct intel_connector *connector)
1596 {
1597         struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
1598         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1599         struct intel_hdcp *hdcp = &connector->hdcp;
1600         enum port port = connector->encoder->port;
1601         int ret = 0;
1602
1603         mutex_lock(&hdcp->mutex);
1604
1605         /* hdcp2_check_link is expected only when HDCP2.2 is Enabled */
1606         if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
1607             !hdcp->hdcp2_encrypted) {
1608                 ret = -EINVAL;
1609                 goto out;
1610         }
1611
1612         if (WARN_ON(!intel_hdcp2_in_use(connector))) {
1613                 DRM_ERROR("HDCP2.2 link stopped the encryption, %x\n",
1614                           I915_READ(HDCP2_STATUS_DDI(port)));
1615                 ret = -ENXIO;
1616                 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1617                 schedule_work(&hdcp->prop_work);
1618                 goto out;
1619         }
1620
1621         ret = hdcp->shim->check_2_2_link(intel_dig_port);
1622         if (ret == HDCP_LINK_PROTECTED) {
1623                 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
1624                         hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
1625                         schedule_work(&hdcp->prop_work);
1626                 }
1627                 goto out;
1628         }
1629
1630         if (ret == HDCP_TOPOLOGY_CHANGE) {
1631                 if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
1632                         goto out;
1633
1634                 DRM_DEBUG_KMS("HDCP2.2 Downstream topology change\n");
1635                 ret = hdcp2_authenticate_repeater_topology(connector);
1636                 if (!ret) {
1637                         hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
1638                         schedule_work(&hdcp->prop_work);
1639                         goto out;
1640                 }
1641                 DRM_DEBUG_KMS("[%s:%d] Repeater topology auth failed.(%d)\n",
1642                               connector->base.name, connector->base.base.id,
1643                               ret);
1644         } else {
1645                 DRM_DEBUG_KMS("[%s:%d] HDCP2.2 link failed, retrying auth\n",
1646                               connector->base.name, connector->base.base.id);
1647         }
1648
1649         ret = _intel_hdcp2_disable(connector);
1650         if (ret) {
1651                 DRM_ERROR("[%s:%d] Failed to disable hdcp2.2 (%d)\n",
1652                           connector->base.name, connector->base.base.id, ret);
1653                 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1654                 schedule_work(&hdcp->prop_work);
1655                 goto out;
1656         }
1657
1658         ret = _intel_hdcp2_enable(connector);
1659         if (ret) {
1660                 DRM_DEBUG_KMS("[%s:%d] Failed to enable hdcp2.2 (%d)\n",
1661                               connector->base.name, connector->base.base.id,
1662                               ret);
1663                 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1664                 schedule_work(&hdcp->prop_work);
1665                 goto out;
1666         }
1667
1668 out:
1669         mutex_unlock(&hdcp->mutex);
1670         return ret;
1671 }
1672
1673 static void intel_hdcp_check_work(struct work_struct *work)
1674 {
1675         struct intel_hdcp *hdcp = container_of(to_delayed_work(work),
1676                                                struct intel_hdcp,
1677                                                check_work);
1678         struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
1679
1680         if (!intel_hdcp2_check_link(connector))
1681                 schedule_delayed_work(&hdcp->check_work,
1682                                       DRM_HDCP2_CHECK_PERIOD_MS);
1683         else if (!intel_hdcp_check_link(connector))
1684                 schedule_delayed_work(&hdcp->check_work,
1685                                       DRM_HDCP_CHECK_PERIOD_MS);
1686 }
1687
1688 static int i915_hdcp_component_bind(struct device *i915_kdev,
1689                                     struct device *mei_kdev, void *data)
1690 {
1691         struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
1692
1693         DRM_DEBUG("I915 HDCP comp bind\n");
1694         mutex_lock(&dev_priv->hdcp_comp_mutex);
1695         dev_priv->hdcp_master = (struct i915_hdcp_comp_master *)data;
1696         dev_priv->hdcp_master->mei_dev = mei_kdev;
1697         mutex_unlock(&dev_priv->hdcp_comp_mutex);
1698
1699         return 0;
1700 }
1701
1702 static void i915_hdcp_component_unbind(struct device *i915_kdev,
1703                                        struct device *mei_kdev, void *data)
1704 {
1705         struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
1706
1707         DRM_DEBUG("I915 HDCP comp unbind\n");
1708         mutex_lock(&dev_priv->hdcp_comp_mutex);
1709         dev_priv->hdcp_master = NULL;
1710         mutex_unlock(&dev_priv->hdcp_comp_mutex);
1711 }
1712
1713 static const struct component_ops i915_hdcp_component_ops = {
1714         .bind   = i915_hdcp_component_bind,
1715         .unbind = i915_hdcp_component_unbind,
1716 };
1717
1718 static inline int initialize_hdcp_port_data(struct intel_connector *connector)
1719 {
1720         struct intel_hdcp *hdcp = &connector->hdcp;
1721         struct hdcp_port_data *data = &hdcp->port_data;
1722
1723         data->port = connector->encoder->port;
1724         data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
1725         data->protocol = (u8)hdcp->shim->protocol;
1726
1727         data->k = 1;
1728         if (!data->streams)
1729                 data->streams = kcalloc(data->k,
1730                                         sizeof(struct hdcp2_streamid_type),
1731                                         GFP_KERNEL);
1732         if (!data->streams) {
1733                 DRM_ERROR("Out of Memory\n");
1734                 return -ENOMEM;
1735         }
1736
1737         data->streams[0].stream_id = 0;
1738         data->streams[0].stream_type = hdcp->content_type;
1739
1740         return 0;
1741 }
1742
1743 static bool is_hdcp2_supported(struct drm_i915_private *dev_priv)
1744 {
1745         if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
1746                 return false;
1747
1748         return (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv) ||
1749                 IS_KABYLAKE(dev_priv));
1750 }
1751
1752 void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
1753 {
1754         int ret;
1755
1756         if (!is_hdcp2_supported(dev_priv))
1757                 return;
1758
1759         mutex_lock(&dev_priv->hdcp_comp_mutex);
1760         WARN_ON(dev_priv->hdcp_comp_added);
1761
1762         dev_priv->hdcp_comp_added = true;
1763         mutex_unlock(&dev_priv->hdcp_comp_mutex);
1764         ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_component_ops,
1765                                   I915_COMPONENT_HDCP);
1766         if (ret < 0) {
1767                 DRM_DEBUG_KMS("Failed at component add(%d)\n", ret);
1768                 mutex_lock(&dev_priv->hdcp_comp_mutex);
1769                 dev_priv->hdcp_comp_added = false;
1770                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1771                 return;
1772         }
1773 }
1774
1775 static void intel_hdcp2_init(struct intel_connector *connector)
1776 {
1777         struct intel_hdcp *hdcp = &connector->hdcp;
1778         int ret;
1779
1780         ret = initialize_hdcp_port_data(connector);
1781         if (ret) {
1782                 DRM_DEBUG_KMS("Mei hdcp data init failed\n");
1783                 return;
1784         }
1785
1786         hdcp->hdcp2_supported = true;
1787 }
1788
1789 int intel_hdcp_init(struct intel_connector *connector,
1790                     const struct intel_hdcp_shim *shim)
1791 {
1792         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1793         struct intel_hdcp *hdcp = &connector->hdcp;
1794         int ret;
1795
1796         if (!shim)
1797                 return -EINVAL;
1798
1799         ret = drm_connector_attach_content_protection_property(&connector->base);
1800         if (ret)
1801                 return ret;
1802
1803         hdcp->shim = shim;
1804         mutex_init(&hdcp->mutex);
1805         INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
1806         INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
1807
1808         if (is_hdcp2_supported(dev_priv))
1809                 intel_hdcp2_init(connector);
1810         init_waitqueue_head(&hdcp->cp_irq_queue);
1811
1812         return 0;
1813 }
1814
1815 int intel_hdcp_enable(struct intel_connector *connector)
1816 {
1817         struct intel_hdcp *hdcp = &connector->hdcp;
1818         unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS;
1819         int ret = -EINVAL;
1820
1821         if (!hdcp->shim)
1822                 return -ENOENT;
1823
1824         mutex_lock(&hdcp->mutex);
1825         WARN_ON(hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
1826
1827         /*
1828          * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
1829          * is capable of HDCP2.2, it is preferred to use HDCP2.2.
1830          */
1831         if (intel_hdcp2_capable(connector)) {
1832                 ret = _intel_hdcp2_enable(connector);
1833                 if (!ret)
1834                         check_link_interval = DRM_HDCP2_CHECK_PERIOD_MS;
1835         }
1836
1837         /* When HDCP2.2 fails, HDCP1.4 will be attempted */
1838         if (ret && intel_hdcp_capable(connector)) {
1839                 ret = _intel_hdcp_enable(connector);
1840         }
1841
1842         if (!ret) {
1843                 schedule_delayed_work(&hdcp->check_work, check_link_interval);
1844                 hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
1845                 schedule_work(&hdcp->prop_work);
1846         }
1847
1848         mutex_unlock(&hdcp->mutex);
1849         return ret;
1850 }
1851
1852 int intel_hdcp_disable(struct intel_connector *connector)
1853 {
1854         struct intel_hdcp *hdcp = &connector->hdcp;
1855         int ret = 0;
1856
1857         if (!hdcp->shim)
1858                 return -ENOENT;
1859
1860         mutex_lock(&hdcp->mutex);
1861
1862         if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
1863                 hdcp->value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED;
1864                 if (hdcp->hdcp2_encrypted)
1865                         ret = _intel_hdcp2_disable(connector);
1866                 else if (hdcp->hdcp_encrypted)
1867                         ret = _intel_hdcp_disable(connector);
1868         }
1869
1870         mutex_unlock(&hdcp->mutex);
1871         cancel_delayed_work_sync(&hdcp->check_work);
1872         return ret;
1873 }
1874
1875 void intel_hdcp_component_fini(struct drm_i915_private *dev_priv)
1876 {
1877         mutex_lock(&dev_priv->hdcp_comp_mutex);
1878         if (!dev_priv->hdcp_comp_added) {
1879                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1880                 return;
1881         }
1882
1883         dev_priv->hdcp_comp_added = false;
1884         mutex_unlock(&dev_priv->hdcp_comp_mutex);
1885
1886         component_del(dev_priv->drm.dev, &i915_hdcp_component_ops);
1887 }
1888
1889 void intel_hdcp_cleanup(struct intel_connector *connector)
1890 {
1891         if (!connector->hdcp.shim)
1892                 return;
1893
1894         mutex_lock(&connector->hdcp.mutex);
1895         kfree(connector->hdcp.port_data.streams);
1896         mutex_unlock(&connector->hdcp.mutex);
1897 }
1898
1899 void intel_hdcp_atomic_check(struct drm_connector *connector,
1900                              struct drm_connector_state *old_state,
1901                              struct drm_connector_state *new_state)
1902 {
1903         u64 old_cp = old_state->content_protection;
1904         u64 new_cp = new_state->content_protection;
1905         struct drm_crtc_state *crtc_state;
1906
1907         if (!new_state->crtc) {
1908                 /*
1909                  * If the connector is being disabled with CP enabled, mark it
1910                  * desired so it's re-enabled when the connector is brought back
1911                  */
1912                 if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
1913                         new_state->content_protection =
1914                                 DRM_MODE_CONTENT_PROTECTION_DESIRED;
1915                 return;
1916         }
1917
1918         /*
1919          * Nothing to do if the state didn't change, or HDCP was activated since
1920          * the last commit
1921          */
1922         if (old_cp == new_cp ||
1923             (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
1924              new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED))
1925                 return;
1926
1927         crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
1928                                                    new_state->crtc);
1929         crtc_state->mode_changed = true;
1930 }
1931
1932 /* Handles the CP_IRQ raised from the DP HDCP sink */
1933 void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
1934 {
1935         struct intel_hdcp *hdcp = &connector->hdcp;
1936
1937         if (!hdcp->shim)
1938                 return;
1939
1940         atomic_inc(&connector->hdcp.cp_irq_count);
1941         wake_up_all(&connector->hdcp.cp_irq_queue);
1942
1943         schedule_delayed_work(&hdcp->check_work, 0);
1944 }