Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright 2015 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: AMD
24 *
25 */
26
27#include <drm/drm_crtc.h>
28#include <drm/drm_vblank.h>
29
30#include "amdgpu.h"
31#include "amdgpu_dm.h"
32#include "dc.h"
33#include "amdgpu_securedisplay.h"
34#include "amdgpu_dm_psr.h"
35
36static const char *const pipe_crc_sources[] = {
37 "none",
38 "crtc",
39 "crtc dither",
40 "dprx",
41 "dprx dither",
42 "auto",
43};
44
45static enum amdgpu_dm_pipe_crc_source dm_parse_crc_source(const char *source)
46{
47 if (!source || !strcmp(source, "none"))
48 return AMDGPU_DM_PIPE_CRC_SOURCE_NONE;
49 if (!strcmp(source, "auto") || !strcmp(source, "crtc"))
50 return AMDGPU_DM_PIPE_CRC_SOURCE_CRTC;
51 if (!strcmp(source, "dprx"))
52 return AMDGPU_DM_PIPE_CRC_SOURCE_DPRX;
53 if (!strcmp(source, "crtc dither"))
54 return AMDGPU_DM_PIPE_CRC_SOURCE_CRTC_DITHER;
55 if (!strcmp(source, "dprx dither"))
56 return AMDGPU_DM_PIPE_CRC_SOURCE_DPRX_DITHER;
57
58 return AMDGPU_DM_PIPE_CRC_SOURCE_INVALID;
59}
60
61static bool dm_is_crc_source_crtc(enum amdgpu_dm_pipe_crc_source src)
62{
63 return (src == AMDGPU_DM_PIPE_CRC_SOURCE_CRTC) ||
64 (src == AMDGPU_DM_PIPE_CRC_SOURCE_CRTC_DITHER);
65}
66
67static bool dm_is_crc_source_dprx(enum amdgpu_dm_pipe_crc_source src)
68{
69 return (src == AMDGPU_DM_PIPE_CRC_SOURCE_DPRX) ||
70 (src == AMDGPU_DM_PIPE_CRC_SOURCE_DPRX_DITHER);
71}
72
73static bool dm_need_crc_dither(enum amdgpu_dm_pipe_crc_source src)
74{
75 return (src == AMDGPU_DM_PIPE_CRC_SOURCE_CRTC_DITHER) ||
76 (src == AMDGPU_DM_PIPE_CRC_SOURCE_DPRX_DITHER) ||
77 (src == AMDGPU_DM_PIPE_CRC_SOURCE_NONE);
78}
79
80const char *const *amdgpu_dm_crtc_get_crc_sources(struct drm_crtc *crtc,
81 size_t *count)
82{
83 *count = ARRAY_SIZE(pipe_crc_sources);
84 return pipe_crc_sources;
85}
86
87#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
88static void update_phy_id_mapping(struct amdgpu_device *adev)
89{
90 struct drm_device *ddev = adev_to_drm(adev);
91 struct amdgpu_display_manager *dm = &adev->dm;
92 struct drm_connector *connector;
93 struct amdgpu_dm_connector *aconnector;
94 struct amdgpu_dm_connector *sort_connector[AMDGPU_DM_MAX_CRTC] = {NULL};
95 struct drm_connector_list_iter iter;
96 uint8_t idx = 0, idx_2 = 0, connector_cnt = 0;
97
98 dm->secure_display_ctx.phy_mapping_updated = false;
99
100 mutex_lock(&ddev->mode_config.mutex);
101 drm_connector_list_iter_begin(ddev, &iter);
102 drm_for_each_connector_iter(connector, &iter) {
103
104 if (connector->status != connector_status_connected)
105 continue;
106
107 if (idx >= AMDGPU_DM_MAX_CRTC) {
108 DRM_WARN("%s connected connectors exceed max crtc\n", __func__);
109 mutex_unlock(&ddev->mode_config.mutex);
110 return;
111 }
112
113 aconnector = to_amdgpu_dm_connector(connector);
114
115 sort_connector[idx] = aconnector;
116 idx++;
117 connector_cnt++;
118 }
119 drm_connector_list_iter_end(&iter);
120
121 /* sort connectors by link_enc_hw_instance first */
122 for (idx = connector_cnt; idx > 1 ; idx--) {
123 for (idx_2 = 0; idx_2 < (idx - 1); idx_2++) {
124 if (sort_connector[idx_2]->dc_link->link_enc_hw_inst >
125 sort_connector[idx_2 + 1]->dc_link->link_enc_hw_inst)
126 swap(sort_connector[idx_2], sort_connector[idx_2 + 1]);
127 }
128 }
129
130 /*
131 * Sort mst connectors by RAD. mst connectors with the same enc_hw_instance are already
132 * sorted together above.
133 */
134 for (idx = 0; idx < connector_cnt; /*Do nothing*/) {
135 if (sort_connector[idx]->mst_root) {
136 uint8_t i, j, k;
137 uint8_t mst_con_cnt = 1;
138
139 for (idx_2 = (idx + 1); idx_2 < connector_cnt; idx_2++) {
140 if (sort_connector[idx_2]->mst_root == sort_connector[idx]->mst_root)
141 mst_con_cnt++;
142 else
143 break;
144 }
145
146 for (i = mst_con_cnt; i > 1; i--) {
147 for (j = idx; j < (idx + i - 2); j++) {
148 int mstb_lct = sort_connector[j]->mst_output_port->parent->lct;
149 int next_mstb_lct = sort_connector[j + 1]->mst_output_port->parent->lct;
150 u8 *rad;
151 u8 *next_rad;
152 bool swap = false;
153
154 /* Sort by mst tree depth first. Then compare RAD if depth is the same*/
155 if (mstb_lct > next_mstb_lct) {
156 swap = true;
157 } else if (mstb_lct == next_mstb_lct) {
158 if (mstb_lct == 1) {
159 if (sort_connector[j]->mst_output_port->port_num > sort_connector[j + 1]->mst_output_port->port_num)
160 swap = true;
161 } else if (mstb_lct > 1) {
162 rad = sort_connector[j]->mst_output_port->parent->rad;
163 next_rad = sort_connector[j + 1]->mst_output_port->parent->rad;
164
165 for (k = 0; k < mstb_lct - 1; k++) {
166 int shift = (k % 2) ? 0 : 4;
167 int port_num = (rad[k / 2] >> shift) & 0xf;
168 int next_port_num = (next_rad[k / 2] >> shift) & 0xf;
169
170 if (port_num > next_port_num) {
171 swap = true;
172 break;
173 }
174 }
175 } else {
176 DRM_ERROR("MST LCT shouldn't be set as < 1");
177 mutex_unlock(&ddev->mode_config.mutex);
178 return;
179 }
180 }
181
182 if (swap)
183 swap(sort_connector[j], sort_connector[j + 1]);
184 }
185 }
186
187 idx += mst_con_cnt;
188 } else {
189 idx++;
190 }
191 }
192
193 /* Complete sorting. Assign relavant result to dm->secure_display_ctx.phy_id_mapping[]*/
194 memset(dm->secure_display_ctx.phy_id_mapping, 0, sizeof(dm->secure_display_ctx.phy_id_mapping));
195 for (idx = 0; idx < connector_cnt; idx++) {
196 aconnector = sort_connector[idx];
197
198 dm->secure_display_ctx.phy_id_mapping[idx].assigned = true;
199 dm->secure_display_ctx.phy_id_mapping[idx].is_mst = false;
200 dm->secure_display_ctx.phy_id_mapping[idx].enc_hw_inst = aconnector->dc_link->link_enc_hw_inst;
201
202 if (sort_connector[idx]->mst_root) {
203 dm->secure_display_ctx.phy_id_mapping[idx].is_mst = true;
204 dm->secure_display_ctx.phy_id_mapping[idx].lct = aconnector->mst_output_port->parent->lct;
205 dm->secure_display_ctx.phy_id_mapping[idx].port_num = aconnector->mst_output_port->port_num;
206 memcpy(dm->secure_display_ctx.phy_id_mapping[idx].rad,
207 aconnector->mst_output_port->parent->rad, sizeof(aconnector->mst_output_port->parent->rad));
208 }
209 }
210 mutex_unlock(&ddev->mode_config.mutex);
211
212 dm->secure_display_ctx.phy_id_mapping_cnt = connector_cnt;
213 dm->secure_display_ctx.phy_mapping_updated = true;
214}
215
216static bool get_phy_id(struct amdgpu_display_manager *dm,
217 struct amdgpu_dm_connector *aconnector, uint8_t *phy_id)
218{
219 int idx, idx_2;
220 bool found = false;
221
222 /*
223 * Assume secure display start after all connectors are probed. The connection
224 * config is static as well
225 */
226 if (!dm->secure_display_ctx.phy_mapping_updated) {
227 DRM_WARN("%s Should update the phy id table before get it's value", __func__);
228 return false;
229 }
230
231 for (idx = 0; idx < dm->secure_display_ctx.phy_id_mapping_cnt; idx++) {
232 if (!dm->secure_display_ctx.phy_id_mapping[idx].assigned) {
233 DRM_ERROR("phy_id_mapping[%d] should be assigned", idx);
234 return false;
235 }
236
237 if (aconnector->dc_link->link_enc_hw_inst ==
238 dm->secure_display_ctx.phy_id_mapping[idx].enc_hw_inst) {
239 if (!dm->secure_display_ctx.phy_id_mapping[idx].is_mst) {
240 found = true;
241 goto out;
242 } else {
243 /* Could caused by wrongly pass mst root connector */
244 if (!aconnector->mst_output_port) {
245 DRM_ERROR("%s Check mst case but connector without a port assigned", __func__);
246 return false;
247 }
248
249 if (aconnector->mst_root &&
250 aconnector->mst_root->mst_mgr.mst_primary == NULL) {
251 DRM_WARN("%s pass in a stale mst connector", __func__);
252 }
253
254 if (aconnector->mst_output_port->parent->lct == dm->secure_display_ctx.phy_id_mapping[idx].lct &&
255 aconnector->mst_output_port->port_num == dm->secure_display_ctx.phy_id_mapping[idx].port_num) {
256 if (aconnector->mst_output_port->parent->lct == 1) {
257 found = true;
258 goto out;
259 } else if (aconnector->mst_output_port->parent->lct > 1) {
260 /* Check RAD */
261 for (idx_2 = 0; idx_2 < aconnector->mst_output_port->parent->lct - 1; idx_2++) {
262 int shift = (idx_2 % 2) ? 0 : 4;
263 int port_num = (aconnector->mst_output_port->parent->rad[idx_2 / 2] >> shift) & 0xf;
264 int port_num2 = (dm->secure_display_ctx.phy_id_mapping[idx].rad[idx_2 / 2] >> shift) & 0xf;
265
266 if (port_num != port_num2)
267 break;
268 }
269
270 if (idx_2 == aconnector->mst_output_port->parent->lct - 1) {
271 found = true;
272 goto out;
273 }
274 } else {
275 DRM_ERROR("lCT should be >= 1");
276 return false;
277 }
278 }
279 }
280 }
281 }
282
283out:
284 if (found) {
285 DRM_DEBUG_DRIVER("Associated secure display PHY ID as %d", idx);
286 *phy_id = idx;
287 } else {
288 DRM_WARN("Can't find associated phy ID");
289 return false;
290 }
291
292 return true;
293}
294
295static void amdgpu_dm_set_crc_window_default(struct drm_crtc *crtc, struct dc_stream_state *stream)
296{
297 struct drm_device *drm_dev = crtc->dev;
298 struct amdgpu_display_manager *dm = &drm_to_adev(drm_dev)->dm;
299 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
300 struct amdgpu_dm_connector *aconnector;
301 bool was_activated;
302 uint8_t phy_id;
303 unsigned long flags;
304 int i;
305
306 spin_lock_irqsave(&drm_dev->event_lock, flags);
307 was_activated = acrtc->dm_irq_params.crc_window_activated;
308 for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) {
309 acrtc->dm_irq_params.window_param[i].x_start = 0;
310 acrtc->dm_irq_params.window_param[i].y_start = 0;
311 acrtc->dm_irq_params.window_param[i].x_end = 0;
312 acrtc->dm_irq_params.window_param[i].y_end = 0;
313 acrtc->dm_irq_params.window_param[i].enable = false;
314 acrtc->dm_irq_params.window_param[i].update_win = false;
315 acrtc->dm_irq_params.window_param[i].skip_frame_cnt = 0;
316 }
317 acrtc->dm_irq_params.crc_window_activated = false;
318 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
319
320 /* Disable secure_display if it was enabled */
321 if (was_activated && dm->secure_display_ctx.op_mode == LEGACY_MODE) {
322 /* stop ROI update on this crtc */
323 flush_work(&dm->secure_display_ctx.crtc_ctx[crtc->index].notify_ta_work);
324 flush_work(&dm->secure_display_ctx.crtc_ctx[crtc->index].forward_roi_work);
325 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
326
327 if (aconnector && get_phy_id(dm, aconnector, &phy_id)) {
328 if (dm->secure_display_ctx.support_mul_roi)
329 dc_stream_forward_multiple_crc_window(stream, NULL, phy_id, true);
330 else
331 dc_stream_forward_crc_window(stream, NULL, phy_id, true);
332 } else {
333 DRM_DEBUG_DRIVER("%s Can't find matching phy id", __func__);
334 }
335 }
336}
337
338static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work)
339{
340 struct secure_display_crtc_context *crtc_ctx;
341 struct psp_context *psp;
342 struct ta_securedisplay_cmd *securedisplay_cmd;
343 struct drm_crtc *crtc;
344 struct dc_stream_state *stream;
345 struct amdgpu_dm_connector *aconnector;
346 uint8_t phy_inst;
347 struct amdgpu_display_manager *dm;
348 struct crc_data crc_cpy[MAX_CRC_WINDOW_NUM];
349 unsigned long flags;
350 uint8_t roi_idx = 0;
351 int ret;
352 int i;
353
354 crtc_ctx = container_of(work, struct secure_display_crtc_context, notify_ta_work);
355 crtc = crtc_ctx->crtc;
356
357 if (!crtc)
358 return;
359
360 psp = &drm_to_adev(crtc->dev)->psp;
361
362 if (!psp->securedisplay_context.context.initialized) {
363 DRM_DEBUG_DRIVER("Secure Display fails to notify PSP TA\n");
364 return;
365 }
366
367 dm = &drm_to_adev(crtc->dev)->dm;
368 stream = to_amdgpu_crtc(crtc)->dm_irq_params.stream;
369 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
370 if (!aconnector)
371 return;
372
373 mutex_lock(&crtc->dev->mode_config.mutex);
374 if (!get_phy_id(dm, aconnector, &phy_inst)) {
375 DRM_WARN("%s Can't find mapping phy id!", __func__);
376 mutex_unlock(&crtc->dev->mode_config.mutex);
377 return;
378 }
379 mutex_unlock(&crtc->dev->mode_config.mutex);
380
381 spin_lock_irqsave(&crtc->dev->event_lock, flags);
382 memcpy(crc_cpy, crtc_ctx->crc_info.crc, sizeof(struct crc_data) * MAX_CRC_WINDOW_NUM);
383 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
384
385 /* need lock for multiple crtcs to use the command buffer */
386 mutex_lock(&psp->securedisplay_context.mutex);
387 /* PSP TA is expected to finish data transmission over I2C within current frame,
388 * even there are up to 4 crtcs request to send in this frame.
389 */
390 if (dm->secure_display_ctx.support_mul_roi) {
391 psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
392 TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC_V2);
393
394 securedisplay_cmd->securedisplay_in_message.send_roi_crc_v2.phy_id = phy_inst;
395
396 for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) {
397 if (crc_cpy[i].crc_ready)
398 roi_idx |= 1 << i;
399 }
400 securedisplay_cmd->securedisplay_in_message.send_roi_crc_v2.roi_idx = roi_idx;
401
402 ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC_V2);
403 } else {
404 psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
405 TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
406
407 securedisplay_cmd->securedisplay_in_message.send_roi_crc.phy_id = phy_inst;
408
409 ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
410 }
411
412 if (!ret) {
413 if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS)
414 psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
415 }
416
417 mutex_unlock(&psp->securedisplay_context.mutex);
418}
419
420static void
421amdgpu_dm_forward_crc_window(struct work_struct *work)
422{
423 struct secure_display_crtc_context *crtc_ctx;
424 struct amdgpu_display_manager *dm;
425 struct drm_crtc *crtc;
426 struct dc_stream_state *stream;
427 struct amdgpu_dm_connector *aconnector;
428 struct crc_window roi_cpy[MAX_CRC_WINDOW_NUM];
429 unsigned long flags;
430 uint8_t phy_id;
431
432 crtc_ctx = container_of(work, struct secure_display_crtc_context, forward_roi_work);
433 crtc = crtc_ctx->crtc;
434
435 if (!crtc)
436 return;
437
438 dm = &drm_to_adev(crtc->dev)->dm;
439 stream = to_amdgpu_crtc(crtc)->dm_irq_params.stream;
440 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
441
442 if (!aconnector)
443 return;
444
445 mutex_lock(&crtc->dev->mode_config.mutex);
446 if (!get_phy_id(dm, aconnector, &phy_id)) {
447 DRM_WARN("%s Can't find mapping phy id!", __func__);
448 mutex_unlock(&crtc->dev->mode_config.mutex);
449 return;
450 }
451 mutex_unlock(&crtc->dev->mode_config.mutex);
452
453 spin_lock_irqsave(&crtc->dev->event_lock, flags);
454 memcpy(roi_cpy, crtc_ctx->roi, sizeof(struct crc_window) * MAX_CRC_WINDOW_NUM);
455 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
456
457 mutex_lock(&dm->dc_lock);
458 if (dm->secure_display_ctx.support_mul_roi)
459 dc_stream_forward_multiple_crc_window(stream, roi_cpy,
460 phy_id, false);
461 else
462 dc_stream_forward_crc_window(stream, &roi_cpy[0].rect,
463 phy_id, false);
464 mutex_unlock(&dm->dc_lock);
465}
466
467bool amdgpu_dm_crc_window_is_activated(struct drm_crtc *crtc)
468{
469 struct drm_device *drm_dev = crtc->dev;
470 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
471 bool ret = false;
472
473 spin_lock_irq(&drm_dev->event_lock);
474 ret = acrtc->dm_irq_params.crc_window_activated;
475 spin_unlock_irq(&drm_dev->event_lock);
476
477 return ret;
478}
479#endif
480
481int
482amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc, const char *src_name,
483 size_t *values_cnt)
484{
485 enum amdgpu_dm_pipe_crc_source source = dm_parse_crc_source(src_name);
486
487 if (source < 0) {
488 DRM_DEBUG_DRIVER("Unknown CRC source %s for CRTC%d\n",
489 src_name, crtc->index);
490 return -EINVAL;
491 }
492
493 *values_cnt = 3;
494 return 0;
495}
496
497int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
498 struct dm_crtc_state *dm_crtc_state,
499 enum amdgpu_dm_pipe_crc_source source)
500{
501 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
502 struct dc_stream_state *stream_state = dm_crtc_state->stream;
503 bool enable = amdgpu_dm_is_valid_crc_source(source);
504 int ret = 0;
505
506 /* Configuration will be deferred to stream enable. */
507 if (!stream_state)
508 return -EINVAL;
509
510 mutex_lock(&adev->dm.dc_lock);
511
512 /* For PSR1, check that the panel has exited PSR */
513 if (stream_state->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1)
514 amdgpu_dm_psr_wait_disable(stream_state);
515
516 /* Enable or disable CRTC CRC generation */
517 if (dm_is_crc_source_crtc(source) || source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE) {
518 if (!dc_stream_configure_crc(stream_state->ctx->dc,
519 stream_state, NULL, enable, enable, 0, true)) {
520 ret = -EINVAL;
521 goto unlock;
522 }
523 }
524
525 /* Configure dithering */
526 if (!dm_need_crc_dither(source)) {
527 dc_stream_set_dither_option(stream_state, DITHER_OPTION_TRUN8);
528 dc_stream_set_dyn_expansion(stream_state->ctx->dc, stream_state,
529 DYN_EXPANSION_DISABLE);
530 } else {
531 dc_stream_set_dither_option(stream_state,
532 DITHER_OPTION_DEFAULT);
533 dc_stream_set_dyn_expansion(stream_state->ctx->dc, stream_state,
534 DYN_EXPANSION_AUTO);
535 }
536
537unlock:
538 mutex_unlock(&adev->dm.dc_lock);
539
540 return ret;
541}
542
543int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
544{
545 enum amdgpu_dm_pipe_crc_source source = dm_parse_crc_source(src_name);
546 enum amdgpu_dm_pipe_crc_source cur_crc_src;
547 struct drm_crtc_commit *commit;
548 struct dm_crtc_state *crtc_state;
549 struct drm_device *drm_dev = crtc->dev;
550#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
551 struct amdgpu_device *adev = drm_to_adev(drm_dev);
552 struct amdgpu_display_manager *dm = &adev->dm;
553#endif
554 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
555 struct drm_dp_aux *aux = NULL;
556 bool enable = false;
557 bool enabled = false;
558 int ret = 0;
559
560 if (source < 0) {
561 DRM_DEBUG_DRIVER("Unknown CRC source %s for CRTC%d\n",
562 src_name, crtc->index);
563 return -EINVAL;
564 }
565
566 ret = drm_modeset_lock(&crtc->mutex, NULL);
567 if (ret)
568 return ret;
569
570 spin_lock(&crtc->commit_lock);
571 commit = list_first_entry_or_null(&crtc->commit_list,
572 struct drm_crtc_commit, commit_entry);
573 if (commit)
574 drm_crtc_commit_get(commit);
575 spin_unlock(&crtc->commit_lock);
576
577 if (commit) {
578 /*
579 * Need to wait for all outstanding programming to complete
580 * in commit tail since it can modify CRC related fields and
581 * hardware state. Since we're holding the CRTC lock we're
582 * guaranteed that no other commit work can be queued off
583 * before we modify the state below.
584 */
585 ret = wait_for_completion_interruptible_timeout(
586 &commit->hw_done, 10 * HZ);
587 if (ret)
588 goto cleanup;
589 }
590
591 enable = amdgpu_dm_is_valid_crc_source(source);
592 crtc_state = to_dm_crtc_state(crtc->state);
593 spin_lock_irq(&drm_dev->event_lock);
594 cur_crc_src = acrtc->dm_irq_params.crc_src;
595 spin_unlock_irq(&drm_dev->event_lock);
596
597 /*
598 * USER REQ SRC | CURRENT SRC | BEHAVIOR
599 * -----------------------------
600 * None | None | Do nothing
601 * None | CRTC | Disable CRTC CRC, set default to dither
602 * None | DPRX | Disable DPRX CRC, need 'aux', set default to dither
603 * None | CRTC DITHER | Disable CRTC CRC
604 * None | DPRX DITHER | Disable DPRX CRC, need 'aux'
605 * CRTC | XXXX | Enable CRTC CRC, no dither
606 * DPRX | XXXX | Enable DPRX CRC, need 'aux', no dither
607 * CRTC DITHER | XXXX | Enable CRTC CRC, set dither
608 * DPRX DITHER | XXXX | Enable DPRX CRC, need 'aux', set dither
609 */
610 if (dm_is_crc_source_dprx(source) ||
611 (source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE &&
612 dm_is_crc_source_dprx(cur_crc_src))) {
613 struct amdgpu_dm_connector *aconn = NULL;
614 struct drm_connector *connector;
615 struct drm_connector_list_iter conn_iter;
616
617 drm_connector_list_iter_begin(crtc->dev, &conn_iter);
618 drm_for_each_connector_iter(connector, &conn_iter) {
619 if (!connector->state || connector->state->crtc != crtc)
620 continue;
621
622 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
623 continue;
624
625 aconn = to_amdgpu_dm_connector(connector);
626 break;
627 }
628 drm_connector_list_iter_end(&conn_iter);
629
630 if (!aconn) {
631 DRM_DEBUG_DRIVER("No amd connector matching CRTC-%d\n", crtc->index);
632 ret = -EINVAL;
633 goto cleanup;
634 }
635
636 aux = (aconn->mst_output_port) ? &aconn->mst_output_port->aux : &aconn->dm_dp_aux.aux;
637
638 if (!aux) {
639 DRM_DEBUG_DRIVER("No dp aux for amd connector\n");
640 ret = -EINVAL;
641 goto cleanup;
642 }
643
644 if ((aconn->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) &&
645 (aconn->base.connector_type != DRM_MODE_CONNECTOR_eDP)) {
646 DRM_DEBUG_DRIVER("No DP connector available for CRC source\n");
647 ret = -EINVAL;
648 goto cleanup;
649 }
650
651 }
652
653 /*
654 * Reading the CRC requires the vblank interrupt handler to be
655 * enabled. Keep a reference until CRC capture stops.
656 */
657 enabled = amdgpu_dm_is_valid_crc_source(cur_crc_src);
658 if (!enabled && enable) {
659 ret = drm_crtc_vblank_get(crtc);
660 if (ret)
661 goto cleanup;
662 }
663
664#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
665 /* Reset secure_display when we change crc source from debugfs */
666 amdgpu_dm_set_crc_window_default(crtc, crtc_state->stream);
667#endif
668
669 if (amdgpu_dm_crtc_configure_crc_source(crtc, crtc_state, source)) {
670 ret = -EINVAL;
671 goto cleanup;
672 }
673
674 if (!enabled && enable) {
675 if (dm_is_crc_source_dprx(source)) {
676 if (drm_dp_start_crc(aux, crtc)) {
677 DRM_DEBUG_DRIVER("dp start crc failed\n");
678 ret = -EINVAL;
679 goto cleanup;
680 }
681 }
682 } else if (enabled && !enable) {
683 drm_crtc_vblank_put(crtc);
684 if (dm_is_crc_source_dprx(source)) {
685 if (drm_dp_stop_crc(aux)) {
686 DRM_DEBUG_DRIVER("dp stop crc failed\n");
687 ret = -EINVAL;
688 goto cleanup;
689 }
690 }
691 }
692
693 spin_lock_irq(&drm_dev->event_lock);
694 acrtc->dm_irq_params.crc_src = source;
695 spin_unlock_irq(&drm_dev->event_lock);
696
697 /* Reset crc_skipped on dm state */
698 crtc_state->crc_skip_count = 0;
699
700#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
701 /* Initialize phy id mapping table for secure display*/
702 if (dm->secure_display_ctx.op_mode == LEGACY_MODE &&
703 !dm->secure_display_ctx.phy_mapping_updated)
704 update_phy_id_mapping(adev);
705#endif
706
707cleanup:
708 if (commit)
709 drm_crtc_commit_put(commit);
710
711 drm_modeset_unlock(&crtc->mutex);
712
713 return ret;
714}
715
716/**
717 * amdgpu_dm_crtc_handle_crc_irq: Report to DRM the CRC on given CRTC.
718 * @crtc: DRM CRTC object.
719 *
720 * This function should be called at the end of a vblank, when the fb has been
721 * fully processed through the pipe.
722 */
723void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc)
724{
725 struct dm_crtc_state *crtc_state;
726 struct dc_stream_state *stream_state;
727 struct drm_device *drm_dev = NULL;
728 enum amdgpu_dm_pipe_crc_source cur_crc_src;
729 struct amdgpu_crtc *acrtc = NULL;
730 uint32_t crcs[3];
731 unsigned long flags;
732
733 if (crtc == NULL)
734 return;
735
736 crtc_state = to_dm_crtc_state(crtc->state);
737 stream_state = crtc_state->stream;
738 acrtc = to_amdgpu_crtc(crtc);
739 drm_dev = crtc->dev;
740
741 spin_lock_irqsave(&drm_dev->event_lock, flags);
742 cur_crc_src = acrtc->dm_irq_params.crc_src;
743 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
744
745 /* Early return if CRC capture is not enabled. */
746 if (!amdgpu_dm_is_valid_crc_source(cur_crc_src))
747 return;
748
749 /*
750 * Since flipping and crc enablement happen asynchronously, we - more
751 * often than not - will be returning an 'uncooked' crc on first frame.
752 * Probably because hw isn't ready yet. For added security, skip the
753 * first two CRC values.
754 */
755 if (crtc_state->crc_skip_count < 2) {
756 crtc_state->crc_skip_count += 1;
757 return;
758 }
759
760 if (dm_is_crc_source_crtc(cur_crc_src)) {
761 if (!dc_stream_get_crc(stream_state->ctx->dc, stream_state, 0,
762 &crcs[0], &crcs[1], &crcs[2]))
763 return;
764
765 drm_crtc_add_crc_entry(crtc, true,
766 drm_crtc_accurate_vblank_count(crtc), crcs);
767 }
768}
769
770#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
771void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc)
772{
773 struct drm_device *drm_dev = NULL;
774 enum amdgpu_dm_pipe_crc_source cur_crc_src;
775 struct amdgpu_crtc *acrtc = NULL;
776 struct amdgpu_device *adev = NULL;
777 struct secure_display_crtc_context *crtc_ctx = NULL;
778 bool reset_crc_frame_count[MAX_CRC_WINDOW_NUM] = {false};
779 uint32_t crc_r[MAX_CRC_WINDOW_NUM] = {0};
780 uint32_t crc_g[MAX_CRC_WINDOW_NUM] = {0};
781 uint32_t crc_b[MAX_CRC_WINDOW_NUM] = {0};
782 unsigned long flags1;
783 bool forward_roi_change = false;
784 bool notify_ta = false;
785 bool all_crc_ready = true;
786 struct dc_stream_state *stream_state;
787 int i;
788
789 if (crtc == NULL)
790 return;
791
792 acrtc = to_amdgpu_crtc(crtc);
793 adev = drm_to_adev(crtc->dev);
794 drm_dev = crtc->dev;
795 stream_state = to_dm_crtc_state(crtc->state)->stream;
796
797 spin_lock_irqsave(&drm_dev->event_lock, flags1);
798 cur_crc_src = acrtc->dm_irq_params.crc_src;
799
800 /* Early return if CRC capture is not enabled. */
801 if (!amdgpu_dm_is_valid_crc_source(cur_crc_src) ||
802 !dm_is_crc_source_crtc(cur_crc_src)) {
803 spin_unlock_irqrestore(&drm_dev->event_lock, flags1);
804 return;
805 }
806
807 if (!acrtc->dm_irq_params.crc_window_activated) {
808 spin_unlock_irqrestore(&drm_dev->event_lock, flags1);
809 return;
810 }
811
812 crtc_ctx = &adev->dm.secure_display_ctx.crtc_ctx[acrtc->crtc_id];
813 if (WARN_ON(crtc_ctx->crtc != crtc)) {
814 /* We have set the crtc when creating secure_display_crtc_context,
815 * don't expect it to be changed here.
816 */
817 crtc_ctx->crtc = crtc;
818 }
819
820 for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) {
821 struct crc_params crc_window = {
822 .windowa_x_start = acrtc->dm_irq_params.window_param[i].x_start,
823 .windowa_y_start = acrtc->dm_irq_params.window_param[i].y_start,
824 .windowa_x_end = acrtc->dm_irq_params.window_param[i].x_end,
825 .windowa_y_end = acrtc->dm_irq_params.window_param[i].y_end,
826 .windowb_x_start = acrtc->dm_irq_params.window_param[i].x_start,
827 .windowb_y_start = acrtc->dm_irq_params.window_param[i].y_start,
828 .windowb_x_end = acrtc->dm_irq_params.window_param[i].x_end,
829 .windowb_y_end = acrtc->dm_irq_params.window_param[i].y_end,
830 };
831
832 crtc_ctx->roi[i].enable = acrtc->dm_irq_params.window_param[i].enable;
833
834 if (!acrtc->dm_irq_params.window_param[i].enable) {
835 crtc_ctx->crc_info.crc[i].crc_ready = false;
836 continue;
837 }
838
839 if (acrtc->dm_irq_params.window_param[i].skip_frame_cnt) {
840 acrtc->dm_irq_params.window_param[i].skip_frame_cnt -= 1;
841 crtc_ctx->crc_info.crc[i].crc_ready = false;
842 continue;
843 }
844
845 if (acrtc->dm_irq_params.window_param[i].update_win) {
846 crtc_ctx->roi[i].rect.x = crc_window.windowa_x_start;
847 crtc_ctx->roi[i].rect.y = crc_window.windowa_y_start;
848 crtc_ctx->roi[i].rect.width = crc_window.windowa_x_end -
849 crc_window.windowa_x_start;
850 crtc_ctx->roi[i].rect.height = crc_window.windowa_y_end -
851 crc_window.windowa_y_start;
852
853 if (adev->dm.secure_display_ctx.op_mode == LEGACY_MODE)
854 /* forward task to dmub to update ROI */
855 forward_roi_change = true;
856 else if (adev->dm.secure_display_ctx.op_mode == DISPLAY_CRC_MODE)
857 /* update ROI via dm*/
858 dc_stream_configure_crc(stream_state->ctx->dc, stream_state,
859 &crc_window, true, true, i, false);
860
861 reset_crc_frame_count[i] = true;
862
863 acrtc->dm_irq_params.window_param[i].update_win = false;
864
865 /* Statically skip 1 frame, because we may need to wait below things
866 * before sending ROI to dmub:
867 * 1. We defer the work by using system workqueue.
868 * 2. We may need to wait for dc_lock before accessing dmub.
869 */
870 acrtc->dm_irq_params.window_param[i].skip_frame_cnt = 1;
871 crtc_ctx->crc_info.crc[i].crc_ready = false;
872 } else {
873 if (!dc_stream_get_crc(stream_state->ctx->dc, stream_state, i,
874 &crc_r[i], &crc_g[i], &crc_b[i]))
875 DRM_ERROR("Secure Display: fail to get crc from engine %d\n", i);
876
877 if (adev->dm.secure_display_ctx.op_mode == LEGACY_MODE)
878 /* forward task to psp to read ROI/CRC and output via I2C */
879 notify_ta = true;
880 else if (adev->dm.secure_display_ctx.op_mode == DISPLAY_CRC_MODE)
881 /* Avoid ROI window get changed, keep overwriting. */
882 dc_stream_configure_crc(stream_state->ctx->dc, stream_state,
883 &crc_window, true, true, i, false);
884
885 /* crc ready for psp to read out */
886 crtc_ctx->crc_info.crc[i].crc_ready = true;
887 }
888 }
889
890 spin_unlock_irqrestore(&drm_dev->event_lock, flags1);
891
892 if (forward_roi_change)
893 schedule_work(&crtc_ctx->forward_roi_work);
894
895 if (notify_ta)
896 schedule_work(&crtc_ctx->notify_ta_work);
897
898 spin_lock_irqsave(&crtc_ctx->crc_info.lock, flags1);
899 for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) {
900 crtc_ctx->crc_info.crc[i].crc_R = crc_r[i];
901 crtc_ctx->crc_info.crc[i].crc_G = crc_g[i];
902 crtc_ctx->crc_info.crc[i].crc_B = crc_b[i];
903
904 if (!crtc_ctx->roi[i].enable) {
905 crtc_ctx->crc_info.crc[i].frame_count = 0;
906 continue;
907 }
908
909 if (!crtc_ctx->crc_info.crc[i].crc_ready)
910 all_crc_ready = false;
911
912 if (reset_crc_frame_count[i] || crtc_ctx->crc_info.crc[i].frame_count == UINT_MAX)
913 /* Reset the reference frame count after user update the ROI
914 * or it reaches the maximum value.
915 */
916 crtc_ctx->crc_info.crc[i].frame_count = 0;
917 else
918 crtc_ctx->crc_info.crc[i].frame_count += 1;
919 }
920 spin_unlock_irqrestore(&crtc_ctx->crc_info.lock, flags1);
921
922 if (all_crc_ready)
923 complete_all(&crtc_ctx->crc_info.completion);
924}
925
926void amdgpu_dm_crtc_secure_display_create_contexts(struct amdgpu_device *adev)
927{
928 struct secure_display_crtc_context *crtc_ctx = NULL;
929 int i;
930
931 crtc_ctx = kcalloc(adev->mode_info.num_crtc,
932 sizeof(struct secure_display_crtc_context),
933 GFP_KERNEL);
934
935 if (!crtc_ctx) {
936 adev->dm.secure_display_ctx.crtc_ctx = NULL;
937 return;
938 }
939
940 for (i = 0; i < adev->mode_info.num_crtc; i++) {
941 INIT_WORK(&crtc_ctx[i].forward_roi_work, amdgpu_dm_forward_crc_window);
942 INIT_WORK(&crtc_ctx[i].notify_ta_work, amdgpu_dm_crtc_notify_ta_to_read);
943 crtc_ctx[i].crtc = &adev->mode_info.crtcs[i]->base;
944 spin_lock_init(&crtc_ctx[i].crc_info.lock);
945 }
946
947 adev->dm.secure_display_ctx.crtc_ctx = crtc_ctx;
948
949 adev->dm.secure_display_ctx.op_mode = DISPLAY_CRC_MODE;
950}
951#endif