Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright 2022 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: AMD
24 *
25 */
26
27#include <drm/drm_atomic_helper.h>
28#include <drm/drm_blend.h>
29#include <drm/drm_gem_atomic_helper.h>
30#include <drm/drm_plane_helper.h>
31#include <drm/drm_gem_framebuffer_helper.h>
32#include <drm/drm_fourcc.h>
33
34#include "amdgpu.h"
35#include "dal_asic_id.h"
36#include "amdgpu_display.h"
37#include "amdgpu_dm_trace.h"
38#include "amdgpu_dm_plane.h"
39#include "gc/gc_11_0_0_offset.h"
40#include "gc/gc_11_0_0_sh_mask.h"
41
42/*
43 * TODO: these are currently initialized to rgb formats only.
44 * For future use cases we should either initialize them dynamically based on
45 * plane capabilities, or initialize this array to all formats, so internal drm
46 * check will succeed, and let DC implement proper check
47 */
48static const uint32_t rgb_formats[] = {
49 DRM_FORMAT_XRGB8888,
50 DRM_FORMAT_ARGB8888,
51 DRM_FORMAT_RGBA8888,
52 DRM_FORMAT_XRGB2101010,
53 DRM_FORMAT_XBGR2101010,
54 DRM_FORMAT_ARGB2101010,
55 DRM_FORMAT_ABGR2101010,
56 DRM_FORMAT_XRGB16161616,
57 DRM_FORMAT_XBGR16161616,
58 DRM_FORMAT_ARGB16161616,
59 DRM_FORMAT_ABGR16161616,
60 DRM_FORMAT_XBGR8888,
61 DRM_FORMAT_ABGR8888,
62 DRM_FORMAT_RGB565,
63};
64
65static const uint32_t overlay_formats[] = {
66 DRM_FORMAT_XRGB8888,
67 DRM_FORMAT_ARGB8888,
68 DRM_FORMAT_RGBA8888,
69 DRM_FORMAT_XBGR8888,
70 DRM_FORMAT_ABGR8888,
71 DRM_FORMAT_RGB565,
72 DRM_FORMAT_NV21,
73 DRM_FORMAT_NV12,
74 DRM_FORMAT_P010
75};
76
77static const uint32_t video_formats[] = {
78 DRM_FORMAT_NV21,
79 DRM_FORMAT_NV12,
80 DRM_FORMAT_P010
81};
82
83static const u32 cursor_formats[] = {
84 DRM_FORMAT_ARGB8888
85};
86
87enum dm_micro_swizzle {
88 MICRO_SWIZZLE_Z = 0,
89 MICRO_SWIZZLE_S = 1,
90 MICRO_SWIZZLE_D = 2,
91 MICRO_SWIZZLE_R = 3
92};
93
94const struct drm_format_info *amdgpu_dm_plane_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
95{
96 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
97}
98
99void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
100 bool *per_pixel_alpha, bool *pre_multiplied_alpha,
101 bool *global_alpha, int *global_alpha_value)
102{
103 *per_pixel_alpha = false;
104 *pre_multiplied_alpha = true;
105 *global_alpha = false;
106 *global_alpha_value = 0xff;
107
108
109 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI ||
110 plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) {
111 static const uint32_t alpha_formats[] = {
112 DRM_FORMAT_ARGB8888,
113 DRM_FORMAT_RGBA8888,
114 DRM_FORMAT_ABGR8888,
115 DRM_FORMAT_ARGB2101010,
116 DRM_FORMAT_ABGR2101010,
117 DRM_FORMAT_ARGB16161616,
118 DRM_FORMAT_ABGR16161616,
119 DRM_FORMAT_ARGB16161616F,
120 };
121 uint32_t format = plane_state->fb->format->format;
122 unsigned int i;
123
124 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
125 if (format == alpha_formats[i]) {
126 *per_pixel_alpha = true;
127 break;
128 }
129 }
130
131 if (*per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE)
132 *pre_multiplied_alpha = false;
133 }
134
135 if (plane_state->alpha < 0xffff) {
136 *global_alpha = true;
137 *global_alpha_value = plane_state->alpha >> 8;
138 }
139}
140
141static void amdgpu_dm_plane_add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
142{
143 if (!*mods)
144 return;
145
146 if (*cap - *size < 1) {
147 uint64_t new_cap = *cap * 2;
148 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
149
150 if (!new_mods) {
151 kfree(*mods);
152 *mods = NULL;
153 return;
154 }
155
156 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
157 kfree(*mods);
158 *mods = new_mods;
159 *cap = new_cap;
160 }
161
162 (*mods)[*size] = mod;
163 *size += 1;
164}
165
166static bool amdgpu_dm_plane_modifier_has_dcc(uint64_t modifier)
167{
168 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
169}
170
171static unsigned int amdgpu_dm_plane_modifier_gfx9_swizzle_mode(uint64_t modifier)
172{
173 if (modifier == DRM_FORMAT_MOD_LINEAR)
174 return 0;
175
176 return AMD_FMT_MOD_GET(TILE, modifier);
177}
178
179static void amdgpu_dm_plane_fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
180 uint64_t tiling_flags)
181{
182 /* Fill GFX8 params */
183 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
184 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
185
186 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
187 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
188 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
189 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
190 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
191
192 /* XXX fix me for VI */
193 tiling_info->gfx8.num_banks = num_banks;
194 tiling_info->gfx8.array_mode =
195 DC_ARRAY_2D_TILED_THIN1;
196 tiling_info->gfx8.tile_split = tile_split;
197 tiling_info->gfx8.bank_width = bankw;
198 tiling_info->gfx8.bank_height = bankh;
199 tiling_info->gfx8.tile_aspect = mtaspect;
200 tiling_info->gfx8.tile_mode =
201 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
202 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
203 == DC_ARRAY_1D_TILED_THIN1) {
204 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
205 }
206
207 tiling_info->gfx8.pipe_config =
208 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
209}
210
211static void amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
212 union dc_tiling_info *tiling_info)
213{
214 /* Fill GFX9 params */
215 tiling_info->gfx9.num_pipes =
216 adev->gfx.config.gb_addr_config_fields.num_pipes;
217 tiling_info->gfx9.num_banks =
218 adev->gfx.config.gb_addr_config_fields.num_banks;
219 tiling_info->gfx9.pipe_interleave =
220 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
221 tiling_info->gfx9.num_shader_engines =
222 adev->gfx.config.gb_addr_config_fields.num_se;
223 tiling_info->gfx9.max_compressed_frags =
224 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
225 tiling_info->gfx9.num_rb_per_se =
226 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
227 tiling_info->gfx9.shaderEnable = 1;
228 if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0))
229 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
230}
231
232static void amdgpu_dm_plane_fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
233 union dc_tiling_info *tiling_info,
234 uint64_t modifier)
235{
236 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
237 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
238 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
239 unsigned int pipes_log2;
240
241 pipes_log2 = min(5u, mod_pipe_xor_bits);
242
243 amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(adev, tiling_info);
244
245 if (!IS_AMD_FMT_MOD(modifier))
246 return;
247
248 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
249 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
250
251 if (adev->family >= AMDGPU_FAMILY_NV) {
252 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
253 } else {
254 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
255
256 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
257 }
258}
259
260static int amdgpu_dm_plane_validate_dcc(struct amdgpu_device *adev,
261 const enum surface_pixel_format format,
262 const enum dc_rotation_angle rotation,
263 const union dc_tiling_info *tiling_info,
264 const struct dc_plane_dcc_param *dcc,
265 const struct dc_plane_address *address,
266 const struct plane_size *plane_size)
267{
268 struct dc *dc = adev->dm.dc;
269 struct dc_dcc_surface_param input;
270 struct dc_surface_dcc_cap output;
271
272 memset(&input, 0, sizeof(input));
273 memset(&output, 0, sizeof(output));
274
275 if (!dcc->enable)
276 return 0;
277
278 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
279 !dc->cap_funcs.get_dcc_compression_cap)
280 return -EINVAL;
281
282 input.format = format;
283 input.surface_size.width = plane_size->surface_size.width;
284 input.surface_size.height = plane_size->surface_size.height;
285 input.swizzle_mode = tiling_info->gfx9.swizzle;
286
287 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
288 input.scan = SCAN_DIRECTION_HORIZONTAL;
289 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
290 input.scan = SCAN_DIRECTION_VERTICAL;
291
292 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
293 return -EINVAL;
294
295 if (!output.capable)
296 return -EINVAL;
297
298 if (dcc->independent_64b_blks == 0 &&
299 output.grph.rgb.independent_64b_blks != 0)
300 return -EINVAL;
301
302 return 0;
303}
304
305static int amdgpu_dm_plane_fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
306 const struct amdgpu_framebuffer *afb,
307 const enum surface_pixel_format format,
308 const enum dc_rotation_angle rotation,
309 const struct plane_size *plane_size,
310 union dc_tiling_info *tiling_info,
311 struct dc_plane_dcc_param *dcc,
312 struct dc_plane_address *address,
313 const bool force_disable_dcc)
314{
315 const uint64_t modifier = afb->base.modifier;
316 int ret = 0;
317
318 amdgpu_dm_plane_fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
319 tiling_info->gfx9.swizzle = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier);
320
321 if (amdgpu_dm_plane_modifier_has_dcc(modifier) && !force_disable_dcc) {
322 uint64_t dcc_address = afb->address + afb->base.offsets[1];
323 bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
324 bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
325
326 dcc->enable = 1;
327 dcc->meta_pitch = afb->base.pitches[1];
328 dcc->independent_64b_blks = independent_64b_blks;
329 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
330 if (independent_64b_blks && independent_128b_blks)
331 dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
332 else if (independent_128b_blks)
333 dcc->dcc_ind_blk = hubp_ind_block_128b;
334 else if (independent_64b_blks && !independent_128b_blks)
335 dcc->dcc_ind_blk = hubp_ind_block_64b;
336 else
337 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
338 } else {
339 if (independent_64b_blks)
340 dcc->dcc_ind_blk = hubp_ind_block_64b;
341 else
342 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
343 }
344
345 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
346 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
347 }
348
349 ret = amdgpu_dm_plane_validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
350 if (ret)
351 drm_dbg_kms(adev_to_drm(adev), "amdgpu_dm_plane_validate_dcc: returned error: %d\n", ret);
352
353 return ret;
354}
355
356static int amdgpu_dm_plane_fill_gfx12_plane_attributes_from_modifiers(struct amdgpu_device *adev,
357 const struct amdgpu_framebuffer *afb,
358 const enum surface_pixel_format format,
359 const enum dc_rotation_angle rotation,
360 const struct plane_size *plane_size,
361 union dc_tiling_info *tiling_info,
362 struct dc_plane_dcc_param *dcc,
363 struct dc_plane_address *address,
364 const bool force_disable_dcc)
365{
366 const uint64_t modifier = afb->base.modifier;
367 int ret = 0;
368
369 /* TODO: Most of this function shouldn't be needed on GFX12. */
370 amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(adev, tiling_info);
371
372 tiling_info->gfx9.swizzle = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier);
373
374 if (amdgpu_dm_plane_modifier_has_dcc(modifier) && !force_disable_dcc) {
375 int max_compressed_block = AMD_FMT_MOD_GET(DCC_MAX_COMPRESSED_BLOCK, modifier);
376
377 dcc->enable = 1;
378 dcc->independent_64b_blks = max_compressed_block == 0;
379
380 if (max_compressed_block == 0)
381 dcc->dcc_ind_blk = hubp_ind_block_64b;
382 else if (max_compressed_block == 1)
383 dcc->dcc_ind_blk = hubp_ind_block_128b;
384 else
385 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
386 }
387
388 /* TODO: This seems wrong because there is no DCC plane on GFX12. */
389 ret = amdgpu_dm_plane_validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
390 if (ret)
391 drm_dbg_kms(adev_to_drm(adev), "amdgpu_dm_plane_validate_dcc: returned error: %d\n", ret);
392
393 return ret;
394}
395
396static void amdgpu_dm_plane_add_gfx10_1_modifiers(const struct amdgpu_device *adev,
397 uint64_t **mods,
398 uint64_t *size,
399 uint64_t *capacity)
400{
401 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
402
403 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
404 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
405 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
406 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
407 AMD_FMT_MOD_SET(DCC, 1) |
408 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
409 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
410 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
411
412 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
413 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
414 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
415 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
416 AMD_FMT_MOD_SET(DCC, 1) |
417 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
418 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
419 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
420 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
421
422 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
423 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
424 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
425 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
426
427 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
428 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
429 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
430 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
431
432
433 /* Only supported for 64bpp, will be filtered in amdgpu_dm_plane_format_mod_supported */
434 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
435 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
436 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
437
438 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
439 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
440 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
441}
442
443static void amdgpu_dm_plane_add_gfx9_modifiers(const struct amdgpu_device *adev,
444 uint64_t **mods,
445 uint64_t *size,
446 uint64_t *capacity)
447{
448 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
449 int pipe_xor_bits = min(8, pipes +
450 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
451 int bank_xor_bits = min(8 - pipe_xor_bits,
452 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
453 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
454 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
455
456
457 if (adev->family == AMDGPU_FAMILY_RV) {
458 /* Raven2 and later */
459 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
460
461 /*
462 * No _D DCC swizzles yet because we only allow 32bpp, which
463 * doesn't support _D on DCN
464 */
465
466 if (has_constant_encode) {
467 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
468 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
469 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
470 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
471 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
472 AMD_FMT_MOD_SET(DCC, 1) |
473 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
474 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
475 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
476 }
477
478 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
479 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
480 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
481 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
482 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
483 AMD_FMT_MOD_SET(DCC, 1) |
484 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
485 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
486 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
487
488 if (has_constant_encode) {
489 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
490 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
491 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
492 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
493 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
494 AMD_FMT_MOD_SET(DCC, 1) |
495 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
496 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
497 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
498 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
499 AMD_FMT_MOD_SET(RB, rb) |
500 AMD_FMT_MOD_SET(PIPE, pipes));
501 }
502
503 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
504 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
505 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
506 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
507 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
508 AMD_FMT_MOD_SET(DCC, 1) |
509 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
510 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
511 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
512 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
513 AMD_FMT_MOD_SET(RB, rb) |
514 AMD_FMT_MOD_SET(PIPE, pipes));
515 }
516
517 /*
518 * Only supported for 64bpp on Raven, will be filtered on format in
519 * amdgpu_dm_plane_format_mod_supported.
520 */
521 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
522 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
523 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
524 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
525 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
526
527 if (adev->family == AMDGPU_FAMILY_RV) {
528 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
529 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
530 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
531 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
532 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
533 }
534
535 /*
536 * Only supported for 64bpp on Raven, will be filtered on format in
537 * amdgpu_dm_plane_format_mod_supported.
538 */
539 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
540 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
541 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
542
543 if (adev->family == AMDGPU_FAMILY_RV) {
544 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
545 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
546 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
547 }
548}
549
550static void amdgpu_dm_plane_add_gfx10_3_modifiers(const struct amdgpu_device *adev,
551 uint64_t **mods,
552 uint64_t *size,
553 uint64_t *capacity)
554{
555 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
556 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
557
558 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
559 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
560 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
561 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
562 AMD_FMT_MOD_SET(PACKERS, pkrs) |
563 AMD_FMT_MOD_SET(DCC, 1) |
564 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
565 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
566 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
567 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
568
569 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
570 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
571 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
572 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
573 AMD_FMT_MOD_SET(PACKERS, pkrs) |
574 AMD_FMT_MOD_SET(DCC, 1) |
575 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
576 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
577 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
578
579 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
580 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
581 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
582 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
583 AMD_FMT_MOD_SET(PACKERS, pkrs) |
584 AMD_FMT_MOD_SET(DCC, 1) |
585 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
586 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
587 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
588 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
589 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
590
591 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
592 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
593 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
594 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
595 AMD_FMT_MOD_SET(PACKERS, pkrs) |
596 AMD_FMT_MOD_SET(DCC, 1) |
597 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
598 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
599 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
600 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
601
602 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
603 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
604 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
605 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
606 AMD_FMT_MOD_SET(PACKERS, pkrs));
607
608 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
609 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
610 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
611 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
612 AMD_FMT_MOD_SET(PACKERS, pkrs));
613
614 /* Only supported for 64bpp, will be filtered in amdgpu_dm_plane_format_mod_supported */
615 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
616 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
617 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
618
619 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
620 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
621 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
622}
623
624static void amdgpu_dm_plane_add_gfx11_modifiers(struct amdgpu_device *adev,
625 uint64_t **mods, uint64_t *size, uint64_t *capacity)
626{
627 int num_pipes = 0;
628 int pipe_xor_bits = 0;
629 int num_pkrs = 0;
630 int pkrs = 0;
631 u32 gb_addr_config;
632 u8 i = 0;
633 unsigned int swizzle_r_x;
634 uint64_t modifier_r_x;
635 uint64_t modifier_dcc_best;
636 uint64_t modifier_dcc_4k;
637
638 /* TODO: GFX11 IP HW init hasnt finish and we get zero if we read from
639 * adev->gfx.config.gb_addr_config_fields.num_{pkrs,pipes}
640 */
641 gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG);
642 ASSERT(gb_addr_config != 0);
643
644 num_pkrs = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS);
645 pkrs = ilog2(num_pkrs);
646 num_pipes = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PIPES);
647 pipe_xor_bits = ilog2(num_pipes);
648
649 for (i = 0; i < 2; i++) {
650 /* Insert the best one first. */
651 /* R_X swizzle modes are the best for rendering and DCC requires them. */
652 if (num_pipes > 16)
653 swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX11_256K_R_X : AMD_FMT_MOD_TILE_GFX9_64K_R_X;
654 else
655 swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX9_64K_R_X : AMD_FMT_MOD_TILE_GFX11_256K_R_X;
656
657 modifier_r_x = AMD_FMT_MOD |
658 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
659 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
660 AMD_FMT_MOD_SET(TILE, swizzle_r_x) |
661 AMD_FMT_MOD_SET(PACKERS, pkrs);
662
663 /* DCC_CONSTANT_ENCODE is not set because it can't vary with gfx11 (it's implied to be 1). */
664 modifier_dcc_best = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) |
665 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 0) |
666 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
667 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B);
668
669 /* DCC settings for 4K and greater resolutions. (required by display hw) */
670 modifier_dcc_4k = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) |
671 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
672 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
673 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B);
674
675 amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_dcc_best);
676 amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_dcc_4k);
677
678 amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_dcc_best | AMD_FMT_MOD_SET(DCC_RETILE, 1));
679 amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_dcc_4k | AMD_FMT_MOD_SET(DCC_RETILE, 1));
680
681 amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_r_x);
682 }
683
684 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
685 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
686 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D));
687}
688
689static void amdgpu_dm_plane_add_gfx12_modifiers(struct amdgpu_device *adev,
690 uint64_t **mods, uint64_t *size, uint64_t *capacity)
691{
692 uint64_t ver = AMD_FMT_MOD | AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX12);
693 uint64_t mod_256k = ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_256K_2D);
694 uint64_t mod_64k = ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_64K_2D);
695 uint64_t mod_4k = ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_4K_2D);
696 uint64_t mod_256b = ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_256B_2D);
697 uint64_t dcc = ver | AMD_FMT_MOD_SET(DCC, 1);
698 uint8_t max_comp_block[] = {1, 0};
699 uint64_t max_comp_block_mod[ARRAY_SIZE(max_comp_block)] = {0};
700 uint8_t i = 0, j = 0;
701 uint64_t gfx12_modifiers[] = {mod_256k, mod_64k, mod_4k, mod_256b, DRM_FORMAT_MOD_LINEAR};
702
703 for (i = 0; i < ARRAY_SIZE(max_comp_block); i++)
704 max_comp_block_mod[i] = AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, max_comp_block[i]);
705
706 /* With DCC: Best choice should be kept first. Hence, add all 256k modifiers of different
707 * max compressed blocks first and then move on to the next smaller sized layouts.
708 * Do not add the linear modifier here, and hence the condition of size-1 for the loop
709 */
710 for (j = 0; j < ARRAY_SIZE(gfx12_modifiers) - 1; j++)
711 for (i = 0; i < ARRAY_SIZE(max_comp_block); i++)
712 amdgpu_dm_plane_add_modifier(mods, size, capacity,
713 ver | dcc | max_comp_block_mod[i] | gfx12_modifiers[j]);
714
715 /* Without DCC. Add all modifiers including linear at the end */
716 for (i = 0; i < ARRAY_SIZE(gfx12_modifiers); i++)
717 amdgpu_dm_plane_add_modifier(mods, size, capacity, gfx12_modifiers[i]);
718
719}
720
721static int amdgpu_dm_plane_get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
722{
723 uint64_t size = 0, capacity = 128;
724 *mods = NULL;
725
726 /* We have not hooked up any pre-GFX9 modifiers. */
727 if (adev->family < AMDGPU_FAMILY_AI)
728 return 0;
729
730 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
731
732 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
733 amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
734 amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
735 return *mods ? 0 : -ENOMEM;
736 }
737
738 switch (adev->family) {
739 case AMDGPU_FAMILY_AI:
740 case AMDGPU_FAMILY_RV:
741 amdgpu_dm_plane_add_gfx9_modifiers(adev, mods, &size, &capacity);
742 break;
743 case AMDGPU_FAMILY_NV:
744 case AMDGPU_FAMILY_VGH:
745 case AMDGPU_FAMILY_YC:
746 case AMDGPU_FAMILY_GC_10_3_6:
747 case AMDGPU_FAMILY_GC_10_3_7:
748 if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0))
749 amdgpu_dm_plane_add_gfx10_3_modifiers(adev, mods, &size, &capacity);
750 else
751 amdgpu_dm_plane_add_gfx10_1_modifiers(adev, mods, &size, &capacity);
752 break;
753 case AMDGPU_FAMILY_GC_11_0_0:
754 case AMDGPU_FAMILY_GC_11_0_1:
755 case AMDGPU_FAMILY_GC_11_5_0:
756 amdgpu_dm_plane_add_gfx11_modifiers(adev, mods, &size, &capacity);
757 break;
758 case AMDGPU_FAMILY_GC_12_0_0:
759 amdgpu_dm_plane_add_gfx12_modifiers(adev, mods, &size, &capacity);
760 break;
761 }
762
763 amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
764
765 /* INVALID marks the end of the list. */
766 amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
767
768 if (!*mods)
769 return -ENOMEM;
770
771 return 0;
772}
773
774static int amdgpu_dm_plane_get_plane_formats(const struct drm_plane *plane,
775 const struct dc_plane_cap *plane_cap,
776 uint32_t *formats, int max_formats)
777{
778 int i, num_formats = 0;
779
780 /*
781 * TODO: Query support for each group of formats directly from
782 * DC plane caps. This will require adding more formats to the
783 * caps list.
784 */
785
786 if (plane->type == DRM_PLANE_TYPE_PRIMARY ||
787 (plane_cap && plane_cap->type == DC_PLANE_TYPE_DCN_UNIVERSAL && plane->type != DRM_PLANE_TYPE_CURSOR)) {
788 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
789 if (num_formats >= max_formats)
790 break;
791
792 formats[num_formats++] = rgb_formats[i];
793 }
794
795 if (plane_cap && plane_cap->pixel_format_support.nv12)
796 formats[num_formats++] = DRM_FORMAT_NV12;
797 if (plane_cap && plane_cap->pixel_format_support.p010)
798 formats[num_formats++] = DRM_FORMAT_P010;
799 if (plane_cap && plane_cap->pixel_format_support.fp16) {
800 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
801 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
802 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
803 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
804 }
805 } else {
806 switch (plane->type) {
807 case DRM_PLANE_TYPE_OVERLAY:
808 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
809 if (num_formats >= max_formats)
810 break;
811
812 formats[num_formats++] = overlay_formats[i];
813 }
814 break;
815
816 case DRM_PLANE_TYPE_CURSOR:
817 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
818 if (num_formats >= max_formats)
819 break;
820
821 formats[num_formats++] = cursor_formats[i];
822 }
823 break;
824
825 default:
826 break;
827 }
828 }
829
830 return num_formats;
831}
832
833int amdgpu_dm_plane_fill_plane_buffer_attributes(struct amdgpu_device *adev,
834 const struct amdgpu_framebuffer *afb,
835 const enum surface_pixel_format format,
836 const enum dc_rotation_angle rotation,
837 const uint64_t tiling_flags,
838 union dc_tiling_info *tiling_info,
839 struct plane_size *plane_size,
840 struct dc_plane_dcc_param *dcc,
841 struct dc_plane_address *address,
842 bool tmz_surface,
843 bool force_disable_dcc)
844{
845 const struct drm_framebuffer *fb = &afb->base;
846 int ret;
847
848 memset(tiling_info, 0, sizeof(*tiling_info));
849 memset(plane_size, 0, sizeof(*plane_size));
850 memset(dcc, 0, sizeof(*dcc));
851 memset(address, 0, sizeof(*address));
852
853 address->tmz_surface = tmz_surface;
854
855 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
856 uint64_t addr = afb->address + fb->offsets[0];
857
858 plane_size->surface_size.x = 0;
859 plane_size->surface_size.y = 0;
860 plane_size->surface_size.width = fb->width;
861 plane_size->surface_size.height = fb->height;
862 plane_size->surface_pitch =
863 fb->pitches[0] / fb->format->cpp[0];
864
865 address->type = PLN_ADDR_TYPE_GRAPHICS;
866 address->grph.addr.low_part = lower_32_bits(addr);
867 address->grph.addr.high_part = upper_32_bits(addr);
868 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
869 uint64_t luma_addr = afb->address + fb->offsets[0];
870 uint64_t chroma_addr = afb->address + fb->offsets[1];
871
872 plane_size->surface_size.x = 0;
873 plane_size->surface_size.y = 0;
874 plane_size->surface_size.width = fb->width;
875 plane_size->surface_size.height = fb->height;
876 plane_size->surface_pitch =
877 fb->pitches[0] / fb->format->cpp[0];
878
879 plane_size->chroma_size.x = 0;
880 plane_size->chroma_size.y = 0;
881 /* TODO: set these based on surface format */
882 plane_size->chroma_size.width = fb->width / 2;
883 plane_size->chroma_size.height = fb->height / 2;
884
885 plane_size->chroma_pitch =
886 fb->pitches[1] / fb->format->cpp[1];
887
888 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
889 address->video_progressive.luma_addr.low_part =
890 lower_32_bits(luma_addr);
891 address->video_progressive.luma_addr.high_part =
892 upper_32_bits(luma_addr);
893 address->video_progressive.chroma_addr.low_part =
894 lower_32_bits(chroma_addr);
895 address->video_progressive.chroma_addr.high_part =
896 upper_32_bits(chroma_addr);
897 }
898
899 if (adev->family >= AMDGPU_FAMILY_GC_12_0_0) {
900 ret = amdgpu_dm_plane_fill_gfx12_plane_attributes_from_modifiers(adev, afb, format,
901 rotation, plane_size,
902 tiling_info, dcc,
903 address,
904 force_disable_dcc);
905 if (ret)
906 return ret;
907 } else if (adev->family >= AMDGPU_FAMILY_AI) {
908 ret = amdgpu_dm_plane_fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
909 rotation, plane_size,
910 tiling_info, dcc,
911 address,
912 force_disable_dcc);
913 if (ret)
914 return ret;
915 } else {
916 amdgpu_dm_plane_fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
917 }
918
919 return 0;
920}
921
922static int amdgpu_dm_plane_helper_prepare_fb(struct drm_plane *plane,
923 struct drm_plane_state *new_state)
924{
925 struct amdgpu_framebuffer *afb;
926 struct drm_gem_object *obj;
927 struct amdgpu_device *adev;
928 struct amdgpu_bo *rbo;
929 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
930 uint32_t domain;
931 int r;
932
933 if (!new_state->fb) {
934 DRM_DEBUG_KMS("No FB bound\n");
935 return 0;
936 }
937
938 afb = to_amdgpu_framebuffer(new_state->fb);
939 obj = drm_gem_fb_get_obj(new_state->fb, 0);
940 if (!obj) {
941 DRM_ERROR("Failed to get obj from framebuffer\n");
942 return -EINVAL;
943 }
944
945 rbo = gem_to_amdgpu_bo(obj);
946 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
947 r = amdgpu_bo_reserve(rbo, true);
948 if (r) {
949 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
950 return r;
951 }
952
953 r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1);
954 if (r) {
955 dev_err(adev->dev, "reserving fence slot failed (%d)\n", r);
956 goto error_unlock;
957 }
958
959 if (plane->type != DRM_PLANE_TYPE_CURSOR)
960 domain = amdgpu_display_supported_domains(adev, rbo->flags);
961 else
962 domain = AMDGPU_GEM_DOMAIN_VRAM;
963
964 r = amdgpu_bo_pin(rbo, domain);
965 if (unlikely(r != 0)) {
966 if (r != -ERESTARTSYS)
967 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
968 goto error_unlock;
969 }
970
971 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
972 if (unlikely(r != 0)) {
973 DRM_ERROR("%p bind failed\n", rbo);
974 goto error_unpin;
975 }
976
977 r = drm_gem_plane_helper_prepare_fb(plane, new_state);
978 if (unlikely(r != 0))
979 goto error_unpin;
980
981 amdgpu_bo_unreserve(rbo);
982
983 afb->address = amdgpu_bo_gpu_offset(rbo);
984
985 amdgpu_bo_ref(rbo);
986
987 /**
988 * We don't do surface updates on planes that have been newly created,
989 * but we also don't have the afb->address during atomic check.
990 *
991 * Fill in buffer attributes depending on the address here, but only on
992 * newly created planes since they're not being used by DC yet and this
993 * won't modify global state.
994 */
995 dm_plane_state_old = to_dm_plane_state(plane->state);
996 dm_plane_state_new = to_dm_plane_state(new_state);
997
998 if (dm_plane_state_new->dc_state &&
999 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
1000 struct dc_plane_state *plane_state =
1001 dm_plane_state_new->dc_state;
1002 bool force_disable_dcc = !plane_state->dcc.enable;
1003
1004 amdgpu_dm_plane_fill_plane_buffer_attributes(
1005 adev, afb, plane_state->format, plane_state->rotation,
1006 afb->tiling_flags,
1007 &plane_state->tiling_info, &plane_state->plane_size,
1008 &plane_state->dcc, &plane_state->address,
1009 afb->tmz_surface, force_disable_dcc);
1010 }
1011
1012 return 0;
1013
1014error_unpin:
1015 amdgpu_bo_unpin(rbo);
1016
1017error_unlock:
1018 amdgpu_bo_unreserve(rbo);
1019 return r;
1020}
1021
1022static void amdgpu_dm_plane_helper_cleanup_fb(struct drm_plane *plane,
1023 struct drm_plane_state *old_state)
1024{
1025 struct amdgpu_bo *rbo;
1026 int r;
1027
1028 if (!old_state->fb)
1029 return;
1030
1031 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
1032 r = amdgpu_bo_reserve(rbo, false);
1033 if (unlikely(r)) {
1034 DRM_ERROR("failed to reserve rbo before unpin\n");
1035 return;
1036 }
1037
1038 amdgpu_bo_unpin(rbo);
1039 amdgpu_bo_unreserve(rbo);
1040 amdgpu_bo_unref(&rbo);
1041}
1042
1043static void amdgpu_dm_plane_get_min_max_dc_plane_scaling(struct drm_device *dev,
1044 struct drm_framebuffer *fb,
1045 int *min_downscale, int *max_upscale)
1046{
1047 struct amdgpu_device *adev = drm_to_adev(dev);
1048 struct dc *dc = adev->dm.dc;
1049 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
1050 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
1051
1052 switch (fb->format->format) {
1053 case DRM_FORMAT_P010:
1054 case DRM_FORMAT_NV12:
1055 case DRM_FORMAT_NV21:
1056 *max_upscale = plane_cap->max_upscale_factor.nv12;
1057 *min_downscale = plane_cap->max_downscale_factor.nv12;
1058 break;
1059
1060 case DRM_FORMAT_XRGB16161616F:
1061 case DRM_FORMAT_ARGB16161616F:
1062 case DRM_FORMAT_XBGR16161616F:
1063 case DRM_FORMAT_ABGR16161616F:
1064 *max_upscale = plane_cap->max_upscale_factor.fp16;
1065 *min_downscale = plane_cap->max_downscale_factor.fp16;
1066 break;
1067
1068 default:
1069 *max_upscale = plane_cap->max_upscale_factor.argb8888;
1070 *min_downscale = plane_cap->max_downscale_factor.argb8888;
1071 break;
1072 }
1073
1074 /*
1075 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
1076 * scaling factor of 1.0 == 1000 units.
1077 */
1078 if (*max_upscale == 1)
1079 *max_upscale = 1000;
1080
1081 if (*min_downscale == 1)
1082 *min_downscale = 1000;
1083}
1084
1085int amdgpu_dm_plane_helper_check_state(struct drm_plane_state *state,
1086 struct drm_crtc_state *new_crtc_state)
1087{
1088 struct drm_framebuffer *fb = state->fb;
1089 int min_downscale, max_upscale;
1090 int min_scale = 0;
1091 int max_scale = INT_MAX;
1092
1093 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
1094 if (fb && state->crtc) {
1095 /* Validate viewport to cover the case when only the position changes */
1096 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
1097 int viewport_width = state->crtc_w;
1098 int viewport_height = state->crtc_h;
1099
1100 if (state->crtc_x < 0)
1101 viewport_width += state->crtc_x;
1102 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
1103 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
1104
1105 if (state->crtc_y < 0)
1106 viewport_height += state->crtc_y;
1107 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
1108 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
1109
1110 if (viewport_width < 0 || viewport_height < 0) {
1111 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
1112 return -EINVAL;
1113 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
1114 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
1115 return -EINVAL;
1116 } else if (viewport_height < MIN_VIEWPORT_SIZE) {
1117 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
1118 return -EINVAL;
1119 }
1120
1121 }
1122
1123 /* Get min/max allowed scaling factors from plane caps. */
1124 amdgpu_dm_plane_get_min_max_dc_plane_scaling(state->crtc->dev, fb,
1125 &min_downscale, &max_upscale);
1126 /*
1127 * Convert to drm convention: 16.16 fixed point, instead of dc's
1128 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
1129 * dst/src, so min_scale = 1.0 / max_upscale, etc.
1130 */
1131 min_scale = (1000 << 16) / max_upscale;
1132 max_scale = (1000 << 16) / min_downscale;
1133 }
1134
1135 return drm_atomic_helper_check_plane_state(
1136 state, new_crtc_state, min_scale, max_scale, true, true);
1137}
1138
1139int amdgpu_dm_plane_fill_dc_scaling_info(struct amdgpu_device *adev,
1140 const struct drm_plane_state *state,
1141 struct dc_scaling_info *scaling_info)
1142{
1143 int scale_w, scale_h, min_downscale, max_upscale;
1144
1145 memset(scaling_info, 0, sizeof(*scaling_info));
1146
1147 /* Source is fixed 16.16 but we ignore mantissa for now... */
1148 scaling_info->src_rect.x = state->src_x >> 16;
1149 scaling_info->src_rect.y = state->src_y >> 16;
1150
1151 /*
1152 * For reasons we don't (yet) fully understand a non-zero
1153 * src_y coordinate into an NV12 buffer can cause a
1154 * system hang on DCN1x.
1155 * To avoid hangs (and maybe be overly cautious)
1156 * let's reject both non-zero src_x and src_y.
1157 *
1158 * We currently know of only one use-case to reproduce a
1159 * scenario with non-zero src_x and src_y for NV12, which
1160 * is to gesture the YouTube Android app into full screen
1161 * on ChromeOS.
1162 */
1163 if (((amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 0)) ||
1164 (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 1))) &&
1165 (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
1166 (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
1167 return -EINVAL;
1168
1169 scaling_info->src_rect.width = state->src_w >> 16;
1170 if (scaling_info->src_rect.width == 0)
1171 return -EINVAL;
1172
1173 scaling_info->src_rect.height = state->src_h >> 16;
1174 if (scaling_info->src_rect.height == 0)
1175 return -EINVAL;
1176
1177 scaling_info->dst_rect.x = state->crtc_x;
1178 scaling_info->dst_rect.y = state->crtc_y;
1179
1180 if (state->crtc_w == 0)
1181 return -EINVAL;
1182
1183 scaling_info->dst_rect.width = state->crtc_w;
1184
1185 if (state->crtc_h == 0)
1186 return -EINVAL;
1187
1188 scaling_info->dst_rect.height = state->crtc_h;
1189
1190 /* DRM doesn't specify clipping on destination output. */
1191 scaling_info->clip_rect = scaling_info->dst_rect;
1192
1193 /* Validate scaling per-format with DC plane caps */
1194 if (state->plane && state->plane->dev && state->fb) {
1195 amdgpu_dm_plane_get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
1196 &min_downscale, &max_upscale);
1197 } else {
1198 min_downscale = 250;
1199 max_upscale = 16000;
1200 }
1201
1202 scale_w = scaling_info->dst_rect.width * 1000 /
1203 scaling_info->src_rect.width;
1204
1205 if (scale_w < min_downscale || scale_w > max_upscale)
1206 return -EINVAL;
1207
1208 scale_h = scaling_info->dst_rect.height * 1000 /
1209 scaling_info->src_rect.height;
1210
1211 if (scale_h < min_downscale || scale_h > max_upscale)
1212 return -EINVAL;
1213
1214 /*
1215 * The "scaling_quality" can be ignored for now, quality = 0 has DC
1216 * assume reasonable defaults based on the format.
1217 */
1218
1219 return 0;
1220}
1221
1222static int amdgpu_dm_plane_atomic_check(struct drm_plane *plane,
1223 struct drm_atomic_state *state)
1224{
1225 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
1226 plane);
1227 struct amdgpu_device *adev = drm_to_adev(plane->dev);
1228 struct dc *dc = adev->dm.dc;
1229 struct dm_plane_state *dm_plane_state;
1230 struct dc_scaling_info scaling_info;
1231 struct drm_crtc_state *new_crtc_state;
1232 int ret;
1233
1234 trace_amdgpu_dm_plane_atomic_check(new_plane_state);
1235
1236 dm_plane_state = to_dm_plane_state(new_plane_state);
1237
1238 if (!dm_plane_state->dc_state)
1239 return 0;
1240
1241 new_crtc_state =
1242 drm_atomic_get_new_crtc_state(state,
1243 new_plane_state->crtc);
1244 if (!new_crtc_state)
1245 return -EINVAL;
1246
1247 ret = amdgpu_dm_plane_helper_check_state(new_plane_state, new_crtc_state);
1248 if (ret)
1249 return ret;
1250
1251 ret = amdgpu_dm_plane_fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
1252 if (ret)
1253 return ret;
1254
1255 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
1256 return 0;
1257
1258 return -EINVAL;
1259}
1260
1261static int amdgpu_dm_plane_atomic_async_check(struct drm_plane *plane,
1262 struct drm_atomic_state *state)
1263{
1264 struct drm_crtc_state *new_crtc_state;
1265 struct drm_plane_state *new_plane_state;
1266 struct dm_crtc_state *dm_new_crtc_state;
1267
1268 /* Only support async updates on cursor planes. */
1269 if (plane->type != DRM_PLANE_TYPE_CURSOR)
1270 return -EINVAL;
1271
1272 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
1273 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
1274 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1275 /* Reject overlay cursors for now*/
1276 if (dm_new_crtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE)
1277 return -EINVAL;
1278
1279 return 0;
1280}
1281
1282int amdgpu_dm_plane_get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
1283 struct dc_cursor_position *position)
1284{
1285 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1286 int x, y;
1287 int xorigin = 0, yorigin = 0;
1288
1289 if (!crtc || !plane->state->fb)
1290 return 0;
1291
1292 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
1293 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
1294 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
1295 __func__,
1296 plane->state->crtc_w,
1297 plane->state->crtc_h);
1298 return -EINVAL;
1299 }
1300
1301 x = plane->state->crtc_x;
1302 y = plane->state->crtc_y;
1303
1304 if (x <= -amdgpu_crtc->max_cursor_width ||
1305 y <= -amdgpu_crtc->max_cursor_height)
1306 return 0;
1307
1308 if (x < 0) {
1309 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
1310 x = 0;
1311 }
1312 if (y < 0) {
1313 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
1314 y = 0;
1315 }
1316 position->enable = true;
1317 position->translate_by_source = true;
1318 position->x = x;
1319 position->y = y;
1320 position->x_hotspot = xorigin;
1321 position->y_hotspot = yorigin;
1322
1323 return 0;
1324}
1325
1326void amdgpu_dm_plane_handle_cursor_update(struct drm_plane *plane,
1327 struct drm_plane_state *old_plane_state)
1328{
1329 struct amdgpu_device *adev = drm_to_adev(plane->dev);
1330 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
1331 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
1332 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
1333 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1334 uint64_t address = afb ? afb->address : 0;
1335 struct dc_cursor_position position = {0};
1336 struct dc_cursor_attributes attributes;
1337 int ret;
1338
1339 if (!plane->state->fb && !old_plane_state->fb)
1340 return;
1341
1342 drm_dbg_atomic(plane->dev, "crtc_id=%d with size %d to %d\n",
1343 amdgpu_crtc->crtc_id, plane->state->crtc_w,
1344 plane->state->crtc_h);
1345
1346 ret = amdgpu_dm_plane_get_cursor_position(plane, crtc, &position);
1347 if (ret)
1348 return;
1349
1350 if (!position.enable) {
1351 /* turn off cursor */
1352 if (crtc_state && crtc_state->stream) {
1353 mutex_lock(&adev->dm.dc_lock);
1354 dc_stream_program_cursor_position(crtc_state->stream,
1355 &position);
1356 mutex_unlock(&adev->dm.dc_lock);
1357 }
1358 return;
1359 }
1360
1361 amdgpu_crtc->cursor_width = plane->state->crtc_w;
1362 amdgpu_crtc->cursor_height = plane->state->crtc_h;
1363
1364 memset(&attributes, 0, sizeof(attributes));
1365 attributes.address.high_part = upper_32_bits(address);
1366 attributes.address.low_part = lower_32_bits(address);
1367 attributes.width = plane->state->crtc_w;
1368 attributes.height = plane->state->crtc_h;
1369 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
1370 attributes.rotation_angle = 0;
1371 attributes.attribute_flags.value = 0;
1372
1373 /* Enable cursor degamma ROM on DCN3+ for implicit sRGB degamma in DRM
1374 * legacy gamma setup.
1375 */
1376 if (crtc_state->cm_is_degamma_srgb &&
1377 adev->dm.dc->caps.color.dpp.gamma_corr)
1378 attributes.attribute_flags.bits.ENABLE_CURSOR_DEGAMMA = 1;
1379
1380 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
1381
1382 if (crtc_state->stream) {
1383 mutex_lock(&adev->dm.dc_lock);
1384 if (!dc_stream_program_cursor_attributes(crtc_state->stream,
1385 &attributes))
1386 DRM_ERROR("DC failed to set cursor attributes\n");
1387
1388 if (!dc_stream_program_cursor_position(crtc_state->stream,
1389 &position))
1390 DRM_ERROR("DC failed to set cursor position\n");
1391 mutex_unlock(&adev->dm.dc_lock);
1392 }
1393}
1394
1395static void amdgpu_dm_plane_atomic_async_update(struct drm_plane *plane,
1396 struct drm_atomic_state *state)
1397{
1398 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
1399 plane);
1400 struct drm_plane_state *old_state =
1401 drm_atomic_get_old_plane_state(state, plane);
1402
1403 trace_amdgpu_dm_atomic_update_cursor(new_state);
1404
1405 swap(plane->state->fb, new_state->fb);
1406
1407 plane->state->src_x = new_state->src_x;
1408 plane->state->src_y = new_state->src_y;
1409 plane->state->src_w = new_state->src_w;
1410 plane->state->src_h = new_state->src_h;
1411 plane->state->crtc_x = new_state->crtc_x;
1412 plane->state->crtc_y = new_state->crtc_y;
1413 plane->state->crtc_w = new_state->crtc_w;
1414 plane->state->crtc_h = new_state->crtc_h;
1415
1416 amdgpu_dm_plane_handle_cursor_update(plane, old_state);
1417}
1418
1419static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
1420 .prepare_fb = amdgpu_dm_plane_helper_prepare_fb,
1421 .cleanup_fb = amdgpu_dm_plane_helper_cleanup_fb,
1422 .atomic_check = amdgpu_dm_plane_atomic_check,
1423 .atomic_async_check = amdgpu_dm_plane_atomic_async_check,
1424 .atomic_async_update = amdgpu_dm_plane_atomic_async_update
1425};
1426
1427static void amdgpu_dm_plane_drm_plane_reset(struct drm_plane *plane)
1428{
1429 struct dm_plane_state *amdgpu_state = NULL;
1430
1431 if (plane->state)
1432 plane->funcs->atomic_destroy_state(plane, plane->state);
1433
1434 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
1435 WARN_ON(amdgpu_state == NULL);
1436
1437 if (!amdgpu_state)
1438 return;
1439
1440 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
1441 amdgpu_state->degamma_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT;
1442 amdgpu_state->hdr_mult = AMDGPU_HDR_MULT_DEFAULT;
1443 amdgpu_state->shaper_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT;
1444 amdgpu_state->blend_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT;
1445}
1446
1447static struct drm_plane_state *amdgpu_dm_plane_drm_plane_duplicate_state(struct drm_plane *plane)
1448{
1449 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
1450
1451 old_dm_plane_state = to_dm_plane_state(plane->state);
1452 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
1453 if (!dm_plane_state)
1454 return NULL;
1455
1456 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
1457
1458 if (old_dm_plane_state->dc_state) {
1459 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
1460 dc_plane_state_retain(dm_plane_state->dc_state);
1461 }
1462
1463 if (old_dm_plane_state->degamma_lut)
1464 dm_plane_state->degamma_lut =
1465 drm_property_blob_get(old_dm_plane_state->degamma_lut);
1466 if (old_dm_plane_state->ctm)
1467 dm_plane_state->ctm =
1468 drm_property_blob_get(old_dm_plane_state->ctm);
1469 if (old_dm_plane_state->shaper_lut)
1470 dm_plane_state->shaper_lut =
1471 drm_property_blob_get(old_dm_plane_state->shaper_lut);
1472 if (old_dm_plane_state->lut3d)
1473 dm_plane_state->lut3d =
1474 drm_property_blob_get(old_dm_plane_state->lut3d);
1475 if (old_dm_plane_state->blend_lut)
1476 dm_plane_state->blend_lut =
1477 drm_property_blob_get(old_dm_plane_state->blend_lut);
1478
1479 dm_plane_state->degamma_tf = old_dm_plane_state->degamma_tf;
1480 dm_plane_state->hdr_mult = old_dm_plane_state->hdr_mult;
1481 dm_plane_state->shaper_tf = old_dm_plane_state->shaper_tf;
1482 dm_plane_state->blend_tf = old_dm_plane_state->blend_tf;
1483
1484 return &dm_plane_state->base;
1485}
1486
1487static bool amdgpu_dm_plane_format_mod_supported(struct drm_plane *plane,
1488 uint32_t format,
1489 uint64_t modifier)
1490{
1491 struct amdgpu_device *adev = drm_to_adev(plane->dev);
1492 const struct drm_format_info *info = drm_format_info(format);
1493 int i;
1494
1495 if (!info)
1496 return false;
1497
1498 /*
1499 * We always have to allow these modifiers:
1500 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
1501 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
1502 */
1503 if (modifier == DRM_FORMAT_MOD_LINEAR ||
1504 modifier == DRM_FORMAT_MOD_INVALID) {
1505 return true;
1506 }
1507
1508 /* Check that the modifier is on the list of the plane's supported modifiers. */
1509 for (i = 0; i < plane->modifier_count; i++) {
1510 if (modifier == plane->modifiers[i])
1511 break;
1512 }
1513 if (i == plane->modifier_count)
1514 return false;
1515
1516 /* GFX12 doesn't have these limitations. */
1517 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) <= AMD_FMT_MOD_TILE_VER_GFX11) {
1518 enum dm_micro_swizzle microtile = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier) & 3;
1519
1520 /*
1521 * For D swizzle the canonical modifier depends on the bpp, so check
1522 * it here.
1523 */
1524 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
1525 adev->family >= AMDGPU_FAMILY_NV) {
1526 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
1527 return false;
1528 }
1529
1530 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
1531 info->cpp[0] < 8)
1532 return false;
1533
1534 if (amdgpu_dm_plane_modifier_has_dcc(modifier)) {
1535 /* Per radeonsi comments 16/64 bpp are more complicated. */
1536 if (info->cpp[0] != 4)
1537 return false;
1538 /* We support multi-planar formats, but not when combined with
1539 * additional DCC metadata planes.
1540 */
1541 if (info->num_planes > 1)
1542 return false;
1543 }
1544 }
1545
1546 return true;
1547}
1548
1549static void amdgpu_dm_plane_drm_plane_destroy_state(struct drm_plane *plane,
1550 struct drm_plane_state *state)
1551{
1552 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
1553
1554 if (dm_plane_state->degamma_lut)
1555 drm_property_blob_put(dm_plane_state->degamma_lut);
1556 if (dm_plane_state->ctm)
1557 drm_property_blob_put(dm_plane_state->ctm);
1558 if (dm_plane_state->lut3d)
1559 drm_property_blob_put(dm_plane_state->lut3d);
1560 if (dm_plane_state->shaper_lut)
1561 drm_property_blob_put(dm_plane_state->shaper_lut);
1562 if (dm_plane_state->blend_lut)
1563 drm_property_blob_put(dm_plane_state->blend_lut);
1564
1565 if (dm_plane_state->dc_state)
1566 dc_plane_state_release(dm_plane_state->dc_state);
1567
1568 drm_atomic_helper_plane_destroy_state(plane, state);
1569}
1570
1571#ifdef AMD_PRIVATE_COLOR
1572static void
1573dm_atomic_plane_attach_color_mgmt_properties(struct amdgpu_display_manager *dm,
1574 struct drm_plane *plane)
1575{
1576 struct amdgpu_mode_info mode_info = dm->adev->mode_info;
1577 struct dpp_color_caps dpp_color_caps = dm->dc->caps.color.dpp;
1578
1579 /* Check HW color pipeline capabilities on DPP block (pre-blending)
1580 * before exposing related properties.
1581 */
1582 if (dpp_color_caps.dgam_ram || dpp_color_caps.gamma_corr) {
1583 drm_object_attach_property(&plane->base,
1584 mode_info.plane_degamma_lut_property,
1585 0);
1586 drm_object_attach_property(&plane->base,
1587 mode_info.plane_degamma_lut_size_property,
1588 MAX_COLOR_LUT_ENTRIES);
1589 drm_object_attach_property(&plane->base,
1590 dm->adev->mode_info.plane_degamma_tf_property,
1591 AMDGPU_TRANSFER_FUNCTION_DEFAULT);
1592 }
1593 /* HDR MULT is always available */
1594 drm_object_attach_property(&plane->base,
1595 dm->adev->mode_info.plane_hdr_mult_property,
1596 AMDGPU_HDR_MULT_DEFAULT);
1597
1598 /* Only enable plane CTM if both DPP and MPC gamut remap is available. */
1599 if (dm->dc->caps.color.mpc.gamut_remap)
1600 drm_object_attach_property(&plane->base,
1601 dm->adev->mode_info.plane_ctm_property, 0);
1602
1603 if (dpp_color_caps.hw_3d_lut) {
1604 drm_object_attach_property(&plane->base,
1605 mode_info.plane_shaper_lut_property, 0);
1606 drm_object_attach_property(&plane->base,
1607 mode_info.plane_shaper_lut_size_property,
1608 MAX_COLOR_LUT_ENTRIES);
1609 drm_object_attach_property(&plane->base,
1610 mode_info.plane_shaper_tf_property,
1611 AMDGPU_TRANSFER_FUNCTION_DEFAULT);
1612 drm_object_attach_property(&plane->base,
1613 mode_info.plane_lut3d_property, 0);
1614 drm_object_attach_property(&plane->base,
1615 mode_info.plane_lut3d_size_property,
1616 MAX_COLOR_3DLUT_SIZE);
1617 }
1618
1619 if (dpp_color_caps.ogam_ram) {
1620 drm_object_attach_property(&plane->base,
1621 mode_info.plane_blend_lut_property, 0);
1622 drm_object_attach_property(&plane->base,
1623 mode_info.plane_blend_lut_size_property,
1624 MAX_COLOR_LUT_ENTRIES);
1625 drm_object_attach_property(&plane->base,
1626 mode_info.plane_blend_tf_property,
1627 AMDGPU_TRANSFER_FUNCTION_DEFAULT);
1628 }
1629}
1630
1631static int
1632dm_atomic_plane_set_property(struct drm_plane *plane,
1633 struct drm_plane_state *state,
1634 struct drm_property *property,
1635 uint64_t val)
1636{
1637 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
1638 struct amdgpu_device *adev = drm_to_adev(plane->dev);
1639 bool replaced = false;
1640 int ret;
1641
1642 if (property == adev->mode_info.plane_degamma_lut_property) {
1643 ret = drm_property_replace_blob_from_id(plane->dev,
1644 &dm_plane_state->degamma_lut,
1645 val, -1,
1646 sizeof(struct drm_color_lut),
1647 &replaced);
1648 dm_plane_state->base.color_mgmt_changed |= replaced;
1649 return ret;
1650 } else if (property == adev->mode_info.plane_degamma_tf_property) {
1651 if (dm_plane_state->degamma_tf != val) {
1652 dm_plane_state->degamma_tf = val;
1653 dm_plane_state->base.color_mgmt_changed = 1;
1654 }
1655 } else if (property == adev->mode_info.plane_hdr_mult_property) {
1656 if (dm_plane_state->hdr_mult != val) {
1657 dm_plane_state->hdr_mult = val;
1658 dm_plane_state->base.color_mgmt_changed = 1;
1659 }
1660 } else if (property == adev->mode_info.plane_ctm_property) {
1661 ret = drm_property_replace_blob_from_id(plane->dev,
1662 &dm_plane_state->ctm,
1663 val,
1664 sizeof(struct drm_color_ctm_3x4), -1,
1665 &replaced);
1666 dm_plane_state->base.color_mgmt_changed |= replaced;
1667 return ret;
1668 } else if (property == adev->mode_info.plane_shaper_lut_property) {
1669 ret = drm_property_replace_blob_from_id(plane->dev,
1670 &dm_plane_state->shaper_lut,
1671 val, -1,
1672 sizeof(struct drm_color_lut),
1673 &replaced);
1674 dm_plane_state->base.color_mgmt_changed |= replaced;
1675 return ret;
1676 } else if (property == adev->mode_info.plane_shaper_tf_property) {
1677 if (dm_plane_state->shaper_tf != val) {
1678 dm_plane_state->shaper_tf = val;
1679 dm_plane_state->base.color_mgmt_changed = 1;
1680 }
1681 } else if (property == adev->mode_info.plane_lut3d_property) {
1682 ret = drm_property_replace_blob_from_id(plane->dev,
1683 &dm_plane_state->lut3d,
1684 val, -1,
1685 sizeof(struct drm_color_lut),
1686 &replaced);
1687 dm_plane_state->base.color_mgmt_changed |= replaced;
1688 return ret;
1689 } else if (property == adev->mode_info.plane_blend_lut_property) {
1690 ret = drm_property_replace_blob_from_id(plane->dev,
1691 &dm_plane_state->blend_lut,
1692 val, -1,
1693 sizeof(struct drm_color_lut),
1694 &replaced);
1695 dm_plane_state->base.color_mgmt_changed |= replaced;
1696 return ret;
1697 } else if (property == adev->mode_info.plane_blend_tf_property) {
1698 if (dm_plane_state->blend_tf != val) {
1699 dm_plane_state->blend_tf = val;
1700 dm_plane_state->base.color_mgmt_changed = 1;
1701 }
1702 } else {
1703 drm_dbg_atomic(plane->dev,
1704 "[PLANE:%d:%s] unknown property [PROP:%d:%s]]\n",
1705 plane->base.id, plane->name,
1706 property->base.id, property->name);
1707 return -EINVAL;
1708 }
1709
1710 return 0;
1711}
1712
1713static int
1714dm_atomic_plane_get_property(struct drm_plane *plane,
1715 const struct drm_plane_state *state,
1716 struct drm_property *property,
1717 uint64_t *val)
1718{
1719 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
1720 struct amdgpu_device *adev = drm_to_adev(plane->dev);
1721
1722 if (property == adev->mode_info.plane_degamma_lut_property) {
1723 *val = (dm_plane_state->degamma_lut) ?
1724 dm_plane_state->degamma_lut->base.id : 0;
1725 } else if (property == adev->mode_info.plane_degamma_tf_property) {
1726 *val = dm_plane_state->degamma_tf;
1727 } else if (property == adev->mode_info.plane_hdr_mult_property) {
1728 *val = dm_plane_state->hdr_mult;
1729 } else if (property == adev->mode_info.plane_ctm_property) {
1730 *val = (dm_plane_state->ctm) ?
1731 dm_plane_state->ctm->base.id : 0;
1732 } else if (property == adev->mode_info.plane_shaper_lut_property) {
1733 *val = (dm_plane_state->shaper_lut) ?
1734 dm_plane_state->shaper_lut->base.id : 0;
1735 } else if (property == adev->mode_info.plane_shaper_tf_property) {
1736 *val = dm_plane_state->shaper_tf;
1737 } else if (property == adev->mode_info.plane_lut3d_property) {
1738 *val = (dm_plane_state->lut3d) ?
1739 dm_plane_state->lut3d->base.id : 0;
1740 } else if (property == adev->mode_info.plane_blend_lut_property) {
1741 *val = (dm_plane_state->blend_lut) ?
1742 dm_plane_state->blend_lut->base.id : 0;
1743 } else if (property == adev->mode_info.plane_blend_tf_property) {
1744 *val = dm_plane_state->blend_tf;
1745
1746 } else {
1747 return -EINVAL;
1748 }
1749
1750 return 0;
1751}
1752#endif
1753
1754static const struct drm_plane_funcs dm_plane_funcs = {
1755 .update_plane = drm_atomic_helper_update_plane,
1756 .disable_plane = drm_atomic_helper_disable_plane,
1757 .destroy = drm_plane_helper_destroy,
1758 .reset = amdgpu_dm_plane_drm_plane_reset,
1759 .atomic_duplicate_state = amdgpu_dm_plane_drm_plane_duplicate_state,
1760 .atomic_destroy_state = amdgpu_dm_plane_drm_plane_destroy_state,
1761 .format_mod_supported = amdgpu_dm_plane_format_mod_supported,
1762#ifdef AMD_PRIVATE_COLOR
1763 .atomic_set_property = dm_atomic_plane_set_property,
1764 .atomic_get_property = dm_atomic_plane_get_property,
1765#endif
1766};
1767
1768int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
1769 struct drm_plane *plane,
1770 unsigned long possible_crtcs,
1771 const struct dc_plane_cap *plane_cap)
1772{
1773 uint32_t formats[32];
1774 int num_formats;
1775 int res = -EPERM;
1776 unsigned int supported_rotations;
1777 uint64_t *modifiers = NULL;
1778 unsigned int primary_zpos = dm->dc->caps.max_slave_planes;
1779
1780 num_formats = amdgpu_dm_plane_get_plane_formats(plane, plane_cap, formats,
1781 ARRAY_SIZE(formats));
1782
1783 res = amdgpu_dm_plane_get_plane_modifiers(dm->adev, plane->type, &modifiers);
1784 if (res)
1785 return res;
1786
1787 if (modifiers == NULL)
1788 adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true;
1789
1790 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
1791 &dm_plane_funcs, formats, num_formats,
1792 modifiers, plane->type, NULL);
1793 kfree(modifiers);
1794 if (res)
1795 return res;
1796
1797 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
1798 plane_cap && plane_cap->per_pixel_alpha) {
1799 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
1800 BIT(DRM_MODE_BLEND_PREMULTI) |
1801 BIT(DRM_MODE_BLEND_COVERAGE);
1802
1803 drm_plane_create_alpha_property(plane);
1804 drm_plane_create_blend_mode_property(plane, blend_caps);
1805 }
1806
1807 if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
1808 /*
1809 * Allow OVERLAY planes to be used as underlays by assigning an
1810 * immutable zpos = # of OVERLAY planes to the PRIMARY plane.
1811 */
1812 drm_plane_create_zpos_immutable_property(plane, primary_zpos);
1813 } else if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
1814 /*
1815 * OVERLAY planes can be below or above the PRIMARY, but cannot
1816 * be above the CURSOR plane.
1817 */
1818 unsigned int zpos = primary_zpos + 1 + drm_plane_index(plane);
1819
1820 drm_plane_create_zpos_property(plane, zpos, 0, 254);
1821 } else if (plane->type == DRM_PLANE_TYPE_CURSOR) {
1822 drm_plane_create_zpos_immutable_property(plane, 255);
1823 }
1824
1825 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
1826 plane_cap &&
1827 (plane_cap->pixel_format_support.nv12 ||
1828 plane_cap->pixel_format_support.p010)) {
1829 /* This only affects YUV formats. */
1830 drm_plane_create_color_properties(
1831 plane,
1832 BIT(DRM_COLOR_YCBCR_BT601) |
1833 BIT(DRM_COLOR_YCBCR_BT709) |
1834 BIT(DRM_COLOR_YCBCR_BT2020),
1835 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
1836 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
1837 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
1838 }
1839
1840 supported_rotations =
1841 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
1842 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
1843
1844 if (dm->adev->asic_type >= CHIP_BONAIRE &&
1845 plane->type != DRM_PLANE_TYPE_CURSOR)
1846 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
1847 supported_rotations);
1848
1849 if (amdgpu_ip_version(dm->adev, DCE_HWIP, 0) > IP_VERSION(3, 0, 1) &&
1850 plane->type != DRM_PLANE_TYPE_CURSOR)
1851 drm_plane_enable_fb_damage_clips(plane);
1852
1853 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
1854
1855#ifdef AMD_PRIVATE_COLOR
1856 dm_atomic_plane_attach_color_mgmt_properties(dm, plane);
1857#endif
1858 /* Create (reset) the plane state */
1859 if (plane->funcs->reset)
1860 plane->funcs->reset(plane);
1861
1862 return 0;
1863}
1864
1865bool amdgpu_dm_plane_is_video_format(uint32_t format)
1866{
1867 int i;
1868
1869 for (i = 0; i < ARRAY_SIZE(video_formats); i++)
1870 if (format == video_formats[i])
1871 return true;
1872
1873 return false;
1874}
1875