Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright 2019 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: AMD
24 *
25 */
26
27#include "amdgpu_dm_hdcp.h"
28#include "amdgpu.h"
29#include "amdgpu_dm.h"
30#include "dc_fused_io.h"
31#include "dm_helpers.h"
32#include <drm/display/drm_hdcp_helper.h>
33#include "hdcp_psp.h"
34
35/*
36 * If the SRM version being loaded is less than or equal to the
37 * currently loaded SRM, psp will return 0xFFFF as the version
38 */
39#define PSP_SRM_VERSION_MAX 0xFFFF
40
41static bool
42lp_write_i2c(void *handle, uint32_t address, const uint8_t *data, uint32_t size)
43{
44 struct dc_link *link = handle;
45 struct i2c_payload i2c_payloads[] = {{true, address, size, (void *)data} };
46 struct i2c_command cmd = {i2c_payloads, 1, I2C_COMMAND_ENGINE_HW,
47 link->dc->caps.i2c_speed_in_khz};
48
49 return dm_helpers_submit_i2c(link->ctx, link, &cmd);
50}
51
52static bool
53lp_read_i2c(void *handle, uint32_t address, uint8_t offset, uint8_t *data, uint32_t size)
54{
55 struct dc_link *link = handle;
56
57 struct i2c_payload i2c_payloads[] = {{true, address, 1, &offset},
58 {false, address, size, data} };
59 struct i2c_command cmd = {i2c_payloads, 2, I2C_COMMAND_ENGINE_HW,
60 link->dc->caps.i2c_speed_in_khz};
61
62 return dm_helpers_submit_i2c(link->ctx, link, &cmd);
63}
64
65static bool
66lp_write_dpcd(void *handle, uint32_t address, const uint8_t *data, uint32_t size)
67{
68 struct dc_link *link = handle;
69
70 return dm_helpers_dp_write_dpcd(link->ctx, link, address, data, size);
71}
72
73static bool
74lp_read_dpcd(void *handle, uint32_t address, uint8_t *data, uint32_t size)
75{
76 struct dc_link *link = handle;
77
78 return dm_helpers_dp_read_dpcd(link->ctx, link, address, data, size);
79}
80
81static bool lp_atomic_write_poll_read_i2c(
82 void *handle,
83 const struct mod_hdcp_atomic_op_i2c *write,
84 const struct mod_hdcp_atomic_op_i2c *poll,
85 struct mod_hdcp_atomic_op_i2c *read,
86 uint32_t poll_timeout_us,
87 uint8_t poll_mask_msb
88)
89{
90 struct dc_link *link = handle;
91
92 return dm_atomic_write_poll_read_i2c(link, write, poll, read, poll_timeout_us, poll_mask_msb);
93}
94
95static bool lp_atomic_write_poll_read_aux(
96 void *handle,
97 const struct mod_hdcp_atomic_op_aux *write,
98 const struct mod_hdcp_atomic_op_aux *poll,
99 struct mod_hdcp_atomic_op_aux *read,
100 uint32_t poll_timeout_us,
101 uint8_t poll_mask_msb
102)
103{
104 struct dc_link *link = handle;
105
106 return dm_atomic_write_poll_read_aux(link, write, poll, read, poll_timeout_us, poll_mask_msb);
107}
108
109static uint8_t *psp_get_srm(struct psp_context *psp, uint32_t *srm_version, uint32_t *srm_size)
110{
111 struct ta_hdcp_shared_memory *hdcp_cmd;
112
113 if (!psp->hdcp_context.context.initialized) {
114 DRM_WARN("Failed to get hdcp srm. HDCP TA is not initialized.");
115 return NULL;
116 }
117
118 hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
119 memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
120
121 hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP_GET_SRM;
122 psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
123
124 if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
125 return NULL;
126
127 *srm_version = hdcp_cmd->out_msg.hdcp_get_srm.srm_version;
128 *srm_size = hdcp_cmd->out_msg.hdcp_get_srm.srm_buf_size;
129
130 return hdcp_cmd->out_msg.hdcp_get_srm.srm_buf;
131}
132
133static int psp_set_srm(struct psp_context *psp,
134 u8 *srm, uint32_t srm_size, uint32_t *srm_version)
135{
136 struct ta_hdcp_shared_memory *hdcp_cmd;
137
138 if (!psp->hdcp_context.context.initialized) {
139 DRM_WARN("Failed to get hdcp srm. HDCP TA is not initialized.");
140 return -EINVAL;
141 }
142
143 hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
144 memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
145
146 memcpy(hdcp_cmd->in_msg.hdcp_set_srm.srm_buf, srm, srm_size);
147 hdcp_cmd->in_msg.hdcp_set_srm.srm_buf_size = srm_size;
148 hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP_SET_SRM;
149
150 psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
151
152 if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS ||
153 hdcp_cmd->out_msg.hdcp_set_srm.valid_signature != 1 ||
154 hdcp_cmd->out_msg.hdcp_set_srm.srm_version == PSP_SRM_VERSION_MAX)
155 return -EINVAL;
156
157 *srm_version = hdcp_cmd->out_msg.hdcp_set_srm.srm_version;
158 return 0;
159}
160
161static void process_output(struct hdcp_workqueue *hdcp_work)
162{
163 struct mod_hdcp_output output = hdcp_work->output;
164
165 if (output.callback_stop)
166 cancel_delayed_work(&hdcp_work->callback_dwork);
167
168 if (output.callback_needed)
169 schedule_delayed_work(&hdcp_work->callback_dwork,
170 msecs_to_jiffies(output.callback_delay));
171
172 if (output.watchdog_timer_stop)
173 cancel_delayed_work(&hdcp_work->watchdog_timer_dwork);
174
175 if (output.watchdog_timer_needed)
176 schedule_delayed_work(&hdcp_work->watchdog_timer_dwork,
177 msecs_to_jiffies(output.watchdog_timer_delay));
178
179 schedule_delayed_work(&hdcp_work->property_validate_dwork, msecs_to_jiffies(0));
180}
181
182static void link_lock(struct hdcp_workqueue *work, bool lock)
183{
184 int i = 0;
185
186 for (i = 0; i < work->max_link; i++) {
187 if (lock)
188 mutex_lock(&work[i].mutex);
189 else
190 mutex_unlock(&work[i].mutex);
191 }
192}
193
194void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
195 unsigned int link_index,
196 struct amdgpu_dm_connector *aconnector,
197 u8 content_type,
198 bool enable_encryption)
199{
200 struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
201 struct mod_hdcp_link_adjustment link_adjust;
202 struct mod_hdcp_display_adjustment display_adjust;
203 unsigned int conn_index = aconnector->base.index;
204 const struct dc *dc = aconnector->dc_link->dc;
205
206 guard(mutex)(&hdcp_w->mutex);
207 drm_connector_get(&aconnector->base);
208 if (hdcp_w->aconnector[conn_index])
209 drm_connector_put(&hdcp_w->aconnector[conn_index]->base);
210 hdcp_w->aconnector[conn_index] = aconnector;
211
212 memset(&link_adjust, 0, sizeof(link_adjust));
213 memset(&display_adjust, 0, sizeof(display_adjust));
214
215 if (enable_encryption) {
216 /* Explicitly set the saved SRM as sysfs call will be after we already enabled hdcp
217 * (s3 resume case)
218 */
219 if (hdcp_work->srm_size > 0)
220 psp_set_srm(hdcp_work->hdcp.config.psp.handle, hdcp_work->srm,
221 hdcp_work->srm_size,
222 &hdcp_work->srm_version);
223
224 display_adjust.disable = MOD_HDCP_DISPLAY_NOT_DISABLE;
225
226 link_adjust.auth_delay = 2;
227 link_adjust.retry_limit = MAX_NUM_OF_ATTEMPTS;
228
229 if (content_type == DRM_MODE_HDCP_CONTENT_TYPE0) {
230 link_adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0;
231 } else if (content_type == DRM_MODE_HDCP_CONTENT_TYPE1) {
232 link_adjust.hdcp1.disable = 1;
233 link_adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_1;
234 }
235 link_adjust.hdcp2.use_fw_locality_check =
236 (dc->caps.fused_io_supported || dc->debug.hdcp_lc_force_fw_enable);
237 link_adjust.hdcp2.use_sw_locality_fallback = dc->debug.hdcp_lc_enable_sw_fallback;
238
239 schedule_delayed_work(&hdcp_w->property_validate_dwork,
240 msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS));
241 } else {
242 display_adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION;
243 hdcp_w->encryption_status[conn_index] = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
244 cancel_delayed_work(&hdcp_w->property_validate_dwork);
245 }
246
247 mod_hdcp_update_display(&hdcp_w->hdcp, conn_index, &link_adjust, &display_adjust, &hdcp_w->output);
248
249 process_output(hdcp_w);
250}
251
252static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
253 unsigned int link_index,
254 struct amdgpu_dm_connector *aconnector)
255{
256 struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
257 struct drm_connector_state *conn_state = aconnector->base.state;
258 unsigned int conn_index = aconnector->base.index;
259
260 guard(mutex)(&hdcp_w->mutex);
261
262 /* the removal of display will invoke auth reset -> hdcp destroy and
263 * we'd expect the Content Protection (CP) property changed back to
264 * DESIRED if at the time ENABLED. CP property change should occur
265 * before the element removed from linked list.
266 */
267 if (conn_state && conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
268 conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
269
270 DRM_DEBUG_DRIVER("[HDCP_DM] display %d, CP 2 -> 1, type %u, DPMS %u\n",
271 aconnector->base.index, conn_state->hdcp_content_type,
272 aconnector->base.dpms);
273 }
274
275 mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output);
276 if (hdcp_w->aconnector[conn_index]) {
277 drm_connector_put(&hdcp_w->aconnector[conn_index]->base);
278 hdcp_w->aconnector[conn_index] = NULL;
279 }
280 process_output(hdcp_w);
281}
282
283void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index)
284{
285 struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
286 unsigned int conn_index;
287
288 guard(mutex)(&hdcp_w->mutex);
289
290 mod_hdcp_reset_connection(&hdcp_w->hdcp, &hdcp_w->output);
291
292 cancel_delayed_work(&hdcp_w->property_validate_dwork);
293
294 for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX; conn_index++) {
295 hdcp_w->encryption_status[conn_index] =
296 MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
297 if (hdcp_w->aconnector[conn_index]) {
298 drm_connector_put(&hdcp_w->aconnector[conn_index]->base);
299 hdcp_w->aconnector[conn_index] = NULL;
300 }
301 }
302
303 process_output(hdcp_w);
304}
305
306void hdcp_handle_cpirq(struct hdcp_workqueue *hdcp_work, unsigned int link_index)
307{
308 struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
309
310 schedule_work(&hdcp_w->cpirq_work);
311}
312
313static void event_callback(struct work_struct *work)
314{
315 struct hdcp_workqueue *hdcp_work;
316
317 hdcp_work = container_of(to_delayed_work(work), struct hdcp_workqueue,
318 callback_dwork);
319
320 guard(mutex)(&hdcp_work->mutex);
321
322 cancel_delayed_work(&hdcp_work->callback_dwork);
323
324 mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_CALLBACK,
325 &hdcp_work->output);
326
327 process_output(hdcp_work);
328}
329
330static void event_property_update(struct work_struct *work)
331{
332 struct hdcp_workqueue *hdcp_work = container_of(work, struct hdcp_workqueue,
333 property_update_work);
334 struct amdgpu_dm_connector *aconnector = NULL;
335 struct drm_device *dev;
336 long ret;
337 unsigned int conn_index;
338 struct drm_connector *connector;
339 struct drm_connector_state *conn_state;
340
341 for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX; conn_index++) {
342 aconnector = hdcp_work->aconnector[conn_index];
343
344 if (!aconnector)
345 continue;
346
347 connector = &aconnector->base;
348
349 /* check if display connected */
350 if (connector->status != connector_status_connected)
351 continue;
352
353 conn_state = aconnector->base.state;
354
355 if (!conn_state)
356 continue;
357
358 dev = connector->dev;
359
360 if (!dev)
361 continue;
362
363 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
364 guard(mutex)(&hdcp_work->mutex);
365
366 if (conn_state->commit) {
367 ret = wait_for_completion_interruptible_timeout(&conn_state->commit->hw_done,
368 10 * HZ);
369 if (ret == 0) {
370 DRM_ERROR("HDCP state unknown! Setting it to DESIRED\n");
371 hdcp_work->encryption_status[conn_index] =
372 MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
373 }
374 }
375 if (hdcp_work->encryption_status[conn_index] !=
376 MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF) {
377 if (conn_state->hdcp_content_type ==
378 DRM_MODE_HDCP_CONTENT_TYPE0 &&
379 hdcp_work->encryption_status[conn_index] <=
380 MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON) {
381 DRM_DEBUG_DRIVER("[HDCP_DM] DRM_MODE_CONTENT_PROTECTION_ENABLED\n");
382 drm_hdcp_update_content_protection(connector,
383 DRM_MODE_CONTENT_PROTECTION_ENABLED);
384 } else if (conn_state->hdcp_content_type ==
385 DRM_MODE_HDCP_CONTENT_TYPE1 &&
386 hdcp_work->encryption_status[conn_index] ==
387 MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON) {
388 drm_hdcp_update_content_protection(connector,
389 DRM_MODE_CONTENT_PROTECTION_ENABLED);
390 }
391 } else {
392 DRM_DEBUG_DRIVER("[HDCP_DM] DRM_MODE_CONTENT_PROTECTION_DESIRED\n");
393 drm_hdcp_update_content_protection(connector,
394 DRM_MODE_CONTENT_PROTECTION_DESIRED);
395 }
396 drm_modeset_unlock(&dev->mode_config.connection_mutex);
397 }
398}
399
400static void event_property_validate(struct work_struct *work)
401{
402 struct hdcp_workqueue *hdcp_work =
403 container_of(to_delayed_work(work), struct hdcp_workqueue, property_validate_dwork);
404 struct mod_hdcp_display_query query;
405 struct amdgpu_dm_connector *aconnector;
406 unsigned int conn_index;
407
408 guard(mutex)(&hdcp_work->mutex);
409
410 for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX;
411 conn_index++) {
412 aconnector = hdcp_work->aconnector[conn_index];
413
414 if (!aconnector)
415 continue;
416
417 /* check if display connected */
418 if (aconnector->base.status != connector_status_connected)
419 continue;
420
421 if (!aconnector->base.state)
422 continue;
423
424 query.encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
425 mod_hdcp_query_display(&hdcp_work->hdcp, aconnector->base.index,
426 &query);
427
428 DRM_DEBUG_DRIVER("[HDCP_DM] disp %d, connector->CP %u, (query, work): (%d, %d)\n",
429 aconnector->base.index,
430 aconnector->base.state->content_protection,
431 query.encryption_status,
432 hdcp_work->encryption_status[conn_index]);
433
434 if (query.encryption_status !=
435 hdcp_work->encryption_status[conn_index]) {
436 DRM_DEBUG_DRIVER("[HDCP_DM] encryption_status change from %x to %x\n",
437 hdcp_work->encryption_status[conn_index],
438 query.encryption_status);
439
440 hdcp_work->encryption_status[conn_index] =
441 query.encryption_status;
442
443 DRM_DEBUG_DRIVER("[HDCP_DM] trigger property_update_work\n");
444
445 schedule_work(&hdcp_work->property_update_work);
446 }
447 }
448}
449
450static void event_watchdog_timer(struct work_struct *work)
451{
452 struct hdcp_workqueue *hdcp_work;
453
454 hdcp_work = container_of(to_delayed_work(work),
455 struct hdcp_workqueue,
456 watchdog_timer_dwork);
457
458 guard(mutex)(&hdcp_work->mutex);
459
460 cancel_delayed_work(&hdcp_work->watchdog_timer_dwork);
461
462 mod_hdcp_process_event(&hdcp_work->hdcp,
463 MOD_HDCP_EVENT_WATCHDOG_TIMEOUT,
464 &hdcp_work->output);
465
466 process_output(hdcp_work);
467}
468
469static void event_cpirq(struct work_struct *work)
470{
471 struct hdcp_workqueue *hdcp_work;
472
473 hdcp_work = container_of(work, struct hdcp_workqueue, cpirq_work);
474
475 guard(mutex)(&hdcp_work->mutex);
476
477 mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_CPIRQ, &hdcp_work->output);
478
479 process_output(hdcp_work);
480}
481
482void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *hdcp_work)
483{
484 int i = 0;
485
486 for (i = 0; i < hdcp_work->max_link; i++) {
487 cancel_delayed_work_sync(&hdcp_work[i].callback_dwork);
488 cancel_delayed_work_sync(&hdcp_work[i].watchdog_timer_dwork);
489 cancel_delayed_work_sync(&hdcp_work[i].property_validate_dwork);
490 }
491
492 sysfs_remove_bin_file(kobj, &hdcp_work[0].attr);
493 kfree(hdcp_work->srm);
494 kfree(hdcp_work->srm_temp);
495 kfree(hdcp_work);
496}
497
498static bool enable_assr(void *handle, struct dc_link *link)
499{
500 struct hdcp_workqueue *hdcp_work = handle;
501 struct mod_hdcp hdcp = hdcp_work->hdcp;
502 struct psp_context *psp = hdcp.config.psp.handle;
503 struct ta_dtm_shared_memory *dtm_cmd;
504
505 if (!psp->dtm_context.context.initialized) {
506 DRM_INFO("Failed to enable ASSR, DTM TA is not initialized.");
507 return false;
508 }
509
510 dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.context.mem_context.shared_buf;
511
512 guard(mutex)(&psp->dtm_context.mutex);
513 memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
514
515 dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_ASSR_ENABLE;
516 dtm_cmd->dtm_in_message.topology_assr_enable.display_topology_dig_be_index =
517 link->link_enc_hw_inst;
518 dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE;
519
520 psp_dtm_invoke(psp, dtm_cmd->cmd_id);
521
522 if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) {
523 DRM_INFO("Failed to enable ASSR");
524 return false;
525 }
526
527 return true;
528}
529
530static void update_config(void *handle, struct cp_psp_stream_config *config)
531{
532 struct hdcp_workqueue *hdcp_work = handle;
533 struct amdgpu_dm_connector *aconnector = config->dm_stream_ctx;
534 int link_index = aconnector->dc_link->link_index;
535 unsigned int conn_index = aconnector->base.index;
536 struct mod_hdcp_display *display = &hdcp_work[link_index].display;
537 struct mod_hdcp_link *link = &hdcp_work[link_index].link;
538 struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
539 struct dc_sink *sink = NULL;
540 bool link_is_hdcp14 = false;
541 const struct dc *dc = aconnector->dc_link->dc;
542
543 if (config->dpms_off) {
544 hdcp_remove_display(hdcp_work, link_index, aconnector);
545 return;
546 }
547
548 memset(display, 0, sizeof(*display));
549 memset(link, 0, sizeof(*link));
550
551 display->index = aconnector->base.index;
552 display->state = MOD_HDCP_DISPLAY_ACTIVE;
553
554 if (aconnector->dc_sink)
555 sink = aconnector->dc_sink;
556 else if (aconnector->dc_em_sink)
557 sink = aconnector->dc_em_sink;
558
559 if (sink)
560 link->mode = mod_hdcp_signal_type_to_operation_mode(sink->sink_signal);
561
562 display->controller = CONTROLLER_ID_D0 + config->otg_inst;
563 display->dig_fe = config->dig_fe;
564 link->dig_be = config->dig_be;
565 link->ddc_line = aconnector->dc_link->ddc_hw_inst + 1;
566 display->stream_enc_idx = config->stream_enc_idx;
567 link->link_enc_idx = config->link_enc_idx;
568 link->dio_output_id = config->dio_output_idx;
569 link->phy_idx = config->phy_idx;
570
571 if (sink)
572 link_is_hdcp14 = dc_link_is_hdcp14(aconnector->dc_link, sink->sink_signal);
573 link->hdcp_supported_informational = link_is_hdcp14;
574 link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw;
575 link->dp.assr_enabled = config->assr_enabled;
576 link->dp.mst_enabled = config->mst_enabled;
577 link->dp.dp2_enabled = config->dp2_enabled;
578 link->dp.usb4_enabled = config->usb4_enabled;
579 display->adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION;
580 link->adjust.auth_delay = 2;
581 link->adjust.retry_limit = MAX_NUM_OF_ATTEMPTS;
582 link->adjust.hdcp1.disable = 0;
583 link->adjust.hdcp2.use_fw_locality_check = (dc->caps.fused_io_supported || dc->debug.hdcp_lc_force_fw_enable);
584 link->adjust.hdcp2.use_sw_locality_fallback = dc->debug.hdcp_lc_enable_sw_fallback;
585 hdcp_w->encryption_status[display->index] = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
586
587 DRM_DEBUG_DRIVER("[HDCP_DM] display %d, CP %d, type %d\n", aconnector->base.index,
588 (!!aconnector->base.state) ?
589 aconnector->base.state->content_protection : -1,
590 (!!aconnector->base.state) ?
591 aconnector->base.state->hdcp_content_type : -1);
592
593 guard(mutex)(&hdcp_w->mutex);
594
595 mod_hdcp_add_display(&hdcp_w->hdcp, link, display, &hdcp_w->output);
596 drm_connector_get(&aconnector->base);
597 if (hdcp_w->aconnector[conn_index])
598 drm_connector_put(&hdcp_w->aconnector[conn_index]->base);
599 hdcp_w->aconnector[conn_index] = aconnector;
600 process_output(hdcp_w);
601}
602
603/**
604 * DOC: Add sysfs interface for set/get srm
605 *
606 * NOTE: From the usermodes prospective you only need to call write *ONCE*, the kernel
607 * will automatically call once or twice depending on the size
608 *
609 * call: "cat file > /sys/class/drm/card0/device/hdcp_srm" from usermode no matter what the size is
610 *
611 * The kernel can only send PAGE_SIZE at once and since MAX_SRM_FILE(5120) > PAGE_SIZE(4096),
612 * srm_data_write can be called multiple times.
613 *
614 * sysfs interface doesn't tell us the size we will get so we are sending partial SRMs to psp and on
615 * the last call we will send the full SRM. PSP will fail on every call before the last.
616 *
617 * This means we don't know if the SRM is good until the last call. And because of this
618 * limitation we cannot throw errors early as it will stop the kernel from writing to sysfs
619 *
620 * Example 1:
621 * Good SRM size = 5096
622 * first call to write 4096 -> PSP fails
623 * Second call to write 1000 -> PSP Pass -> SRM is set
624 *
625 * Example 2:
626 * Bad SRM size = 4096
627 * first call to write 4096 -> PSP fails (This is the same as above, but we don't know if this
628 * is the last call)
629 *
630 * Solution?:
631 * 1: Parse the SRM? -> It is signed so we don't know the EOF
632 * 2: We can have another sysfs that passes the size before calling set. -> simpler solution
633 * below
634 *
635 * Easy Solution:
636 * Always call get after Set to verify if set was successful.
637 * +----------------------+
638 * | Why it works: |
639 * +----------------------+
640 * PSP will only update its srm if its older than the one we are trying to load.
641 * Always do set first than get.
642 * -if we try to "1. SET" a older version PSP will reject it and we can "2. GET" the newer
643 * version and save it
644 *
645 * -if we try to "1. SET" a newer version PSP will accept it and we can "2. GET" the
646 * same(newer) version back and save it
647 *
648 * -if we try to "1. SET" a newer version and PSP rejects it. That means the format is
649 * incorrect/corrupted and we should correct our SRM by getting it from PSP
650 */
651static ssize_t srm_data_write(struct file *filp, struct kobject *kobj,
652 const struct bin_attribute *bin_attr, char *buffer,
653 loff_t pos, size_t count)
654{
655 struct hdcp_workqueue *work;
656 u32 srm_version = 0;
657
658 work = container_of(bin_attr, struct hdcp_workqueue, attr);
659 link_lock(work, true);
660
661 memcpy(work->srm_temp + pos, buffer, count);
662
663 if (!psp_set_srm(work->hdcp.config.psp.handle, work->srm_temp, pos + count, &srm_version)) {
664 DRM_DEBUG_DRIVER("HDCP SRM SET version 0x%X", srm_version);
665 memcpy(work->srm, work->srm_temp, pos + count);
666 work->srm_size = pos + count;
667 work->srm_version = srm_version;
668 }
669
670 link_lock(work, false);
671
672 return count;
673}
674
675static ssize_t srm_data_read(struct file *filp, struct kobject *kobj,
676 const struct bin_attribute *bin_attr, char *buffer,
677 loff_t pos, size_t count)
678{
679 struct hdcp_workqueue *work;
680 u8 *srm = NULL;
681 u32 srm_version;
682 u32 srm_size;
683 size_t ret = count;
684
685 work = container_of(bin_attr, struct hdcp_workqueue, attr);
686
687 link_lock(work, true);
688
689 srm = psp_get_srm(work->hdcp.config.psp.handle, &srm_version, &srm_size);
690
691 if (!srm) {
692 ret = -EINVAL;
693 goto ret;
694 }
695
696 if (pos >= srm_size)
697 ret = 0;
698
699 if (srm_size - pos < count) {
700 memcpy(buffer, srm + pos, srm_size - pos);
701 ret = srm_size - pos;
702 goto ret;
703 }
704
705 memcpy(buffer, srm + pos, count);
706
707ret:
708 link_lock(work, false);
709 return ret;
710}
711
712/* From the hdcp spec (5.Renewability) SRM needs to be stored in a non-volatile memory.
713 *
714 * For example,
715 * if Application "A" sets the SRM (ver 2) and we reboot/suspend and later when Application "B"
716 * needs to use HDCP, the version in PSP should be SRM(ver 2). So SRM should be persistent
717 * across boot/reboots/suspend/resume/shutdown
718 *
719 * Currently when the system goes down (suspend/shutdown) the SRM is cleared from PSP. For HDCP
720 * we need to make the SRM persistent.
721 *
722 * -PSP owns the checking of SRM but doesn't have the ability to store it in a non-volatile memory.
723 * -The kernel cannot write to the file systems.
724 * -So we need usermode to do this for us, which is why an interface for usermode is needed
725 *
726 *
727 *
728 * Usermode can read/write to/from PSP using the sysfs interface
729 * For example:
730 * to save SRM from PSP to storage : cat /sys/class/drm/card0/device/hdcp_srm > srmfile
731 * to load from storage to PSP: cat srmfile > /sys/class/drm/card0/device/hdcp_srm
732 */
733static const struct bin_attribute data_attr = {
734 .attr = {.name = "hdcp_srm", .mode = 0664},
735 .size = PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, /* Limit SRM size */
736 .write = srm_data_write,
737 .read = srm_data_read,
738};
739
740struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev,
741 struct cp_psp *cp_psp, struct dc *dc)
742{
743 int max_caps = dc->caps.max_links;
744 struct hdcp_workqueue *hdcp_work;
745 int i = 0;
746
747 hdcp_work = kcalloc(max_caps, sizeof(*hdcp_work), GFP_KERNEL);
748 if (ZERO_OR_NULL_PTR(hdcp_work))
749 return NULL;
750
751 hdcp_work->srm = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE,
752 sizeof(*hdcp_work->srm), GFP_KERNEL);
753
754 if (!hdcp_work->srm)
755 goto fail_alloc_context;
756
757 hdcp_work->srm_temp = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE,
758 sizeof(*hdcp_work->srm_temp), GFP_KERNEL);
759
760 if (!hdcp_work->srm_temp)
761 goto fail_alloc_context;
762
763 hdcp_work->max_link = max_caps;
764
765 for (i = 0; i < max_caps; i++) {
766 mutex_init(&hdcp_work[i].mutex);
767
768 INIT_WORK(&hdcp_work[i].cpirq_work, event_cpirq);
769 INIT_WORK(&hdcp_work[i].property_update_work, event_property_update);
770 INIT_DELAYED_WORK(&hdcp_work[i].callback_dwork, event_callback);
771 INIT_DELAYED_WORK(&hdcp_work[i].watchdog_timer_dwork, event_watchdog_timer);
772 INIT_DELAYED_WORK(&hdcp_work[i].property_validate_dwork, event_property_validate);
773
774 struct mod_hdcp_config *config = &hdcp_work[i].hdcp.config;
775 struct mod_hdcp_ddc_funcs *ddc_funcs = &config->ddc.funcs;
776
777 config->psp.handle = &adev->psp;
778 if (dc->ctx->dce_version == DCN_VERSION_3_1 ||
779 dc->ctx->dce_version == DCN_VERSION_3_14 ||
780 dc->ctx->dce_version == DCN_VERSION_3_15 ||
781 dc->ctx->dce_version == DCN_VERSION_3_16 ||
782 dc->ctx->dce_version == DCN_VERSION_3_2 ||
783 dc->ctx->dce_version == DCN_VERSION_3_21 ||
784 dc->ctx->dce_version == DCN_VERSION_3_5 ||
785 dc->ctx->dce_version == DCN_VERSION_3_51 ||
786 dc->ctx->dce_version == DCN_VERSION_3_6 ||
787 dc->ctx->dce_version == DCN_VERSION_4_01)
788 config->psp.caps.dtm_v3_supported = 1;
789
790 config->ddc.handle = dc_get_link_at_index(dc, i);
791
792 ddc_funcs->write_i2c = lp_write_i2c;
793 ddc_funcs->read_i2c = lp_read_i2c;
794 ddc_funcs->write_dpcd = lp_write_dpcd;
795 ddc_funcs->read_dpcd = lp_read_dpcd;
796 ddc_funcs->atomic_write_poll_read_i2c = lp_atomic_write_poll_read_i2c;
797 ddc_funcs->atomic_write_poll_read_aux = lp_atomic_write_poll_read_aux;
798
799 memset(hdcp_work[i].aconnector, 0,
800 sizeof(struct amdgpu_dm_connector *) *
801 AMDGPU_DM_MAX_DISPLAY_INDEX);
802 memset(hdcp_work[i].encryption_status, 0,
803 sizeof(enum mod_hdcp_encryption_status) *
804 AMDGPU_DM_MAX_DISPLAY_INDEX);
805 }
806
807 cp_psp->funcs.update_stream_config = update_config;
808 cp_psp->funcs.enable_assr = enable_assr;
809 cp_psp->handle = hdcp_work;
810
811 /* File created at /sys/class/drm/card0/device/hdcp_srm*/
812 hdcp_work[0].attr = data_attr;
813 sysfs_bin_attr_init(&hdcp_work[0].attr);
814
815 if (sysfs_create_bin_file(&adev->dev->kobj, &hdcp_work[0].attr))
816 DRM_WARN("Failed to create device file hdcp_srm");
817
818 return hdcp_work;
819
820fail_alloc_context:
821 kfree(hdcp_work->srm);
822 kfree(hdcp_work->srm_temp);
823 kfree(hdcp_work);
824
825 return NULL;
826}
827