Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge commit 'refs/for-upstream/mali-dp' of git://linux-arm.org/linux-ld into drm-next

This pull requests adds initial Mali D71 support into the Arm "komeda" DRM
driver. The code has been reviewed at the end of last year, I just been
too slow with pushing it into mainline. Since it started baking in
linux-next we had a kbuild-bot issue raised and one from Joe Perches on
the MAINTAINERS entry, for which I'm including fixes here.

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Liviu Dudau <Liviu.Dudau@arm.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190401192833.GW21747@e110455-lin.cambridge.arm.com

+1986 -34
+1 -1
MAINTAINERS
··· 1167 1167 T: git git://linux-arm.org/linux-ld.git for-upstream/mali-dp 1168 1168 F: drivers/gpu/drm/arm/display/include/ 1169 1169 F: drivers/gpu/drm/arm/display/komeda/ 1170 - F: Documentation/devicetree/bindings/display/arm/arm,komeda.txt 1170 + F: Documentation/devicetree/bindings/display/arm,komeda.txt 1171 1171 F: Documentation/gpu/komeda-kms.rst 1172 1172 1173 1173 ARM MALI-DP DRM DRIVER
+31
drivers/gpu/drm/arm/display/include/malidp_utils.h
··· 7 7 #ifndef _MALIDP_UTILS_ 8 8 #define _MALIDP_UTILS_ 9 9 10 + #include <linux/delay.h> 11 + 10 12 #define has_bit(nr, mask) (BIT(nr) & (mask)) 11 13 #define has_bits(bits, mask) (((bits) & (mask)) == (bits)) 12 14 13 15 #define dp_for_each_set_bit(bit, mask) \ 14 16 for_each_set_bit((bit), ((unsigned long *)&(mask)), sizeof(mask) * 8) 17 + 18 + #define dp_wait_cond(__cond, __tries, __min_range, __max_range) \ 19 + ({ \ 20 + int num_tries = __tries; \ 21 + while (!__cond && (num_tries > 0)) { \ 22 + usleep_range(__min_range, __max_range); \ 23 + if (__cond) \ 24 + break; \ 25 + num_tries--; \ 26 + } \ 27 + num_tries; \ 28 + }) 29 + 30 + /* the restriction of range is [start, end] */ 31 + struct malidp_range { 32 + u32 start; 33 + u32 end; 34 + }; 35 + 36 + static inline void set_range(struct malidp_range *rg, u32 start, u32 end) 37 + { 38 + rg->start = start; 39 + rg->end = end; 40 + } 41 + 42 + static inline bool in_range(struct malidp_range *rg, u32 v) 43 + { 44 + return (v >= rg->start) && (v <= rg->end); 45 + } 15 46 16 47 #endif /* _MALIDP_UTILS_ */
+2 -1
drivers/gpu/drm/arm/display/komeda/Makefile
··· 16 16 komeda_private_obj.o 17 17 18 18 komeda-y += \ 19 - d71/d71_dev.o 19 + d71/d71_dev.o \ 20 + d71/d71_component.o 20 21 21 22 obj-$(CONFIG_DRM_KOMEDA) += komeda.o
+684
drivers/gpu/drm/arm/display/komeda/d71/d71_component.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * (C) COPYRIGHT 2018 ARM Limited. All rights reserved. 4 + * Author: James.Qian.Wang <james.qian.wang@arm.com> 5 + * 6 + */ 7 + 8 + #include <drm/drm_print.h> 9 + #include "d71_dev.h" 10 + #include "komeda_kms.h" 11 + #include "malidp_io.h" 12 + #include "komeda_framebuffer.h" 13 + 14 + static void get_resources_id(u32 hw_id, u32 *pipe_id, u32 *comp_id) 15 + { 16 + u32 id = BLOCK_INFO_BLK_ID(hw_id); 17 + u32 pipe = id; 18 + 19 + switch (BLOCK_INFO_BLK_TYPE(hw_id)) { 20 + case D71_BLK_TYPE_LPU_WB_LAYER: 21 + id = KOMEDA_COMPONENT_WB_LAYER; 22 + break; 23 + case D71_BLK_TYPE_CU_SPLITTER: 24 + id = KOMEDA_COMPONENT_SPLITTER; 25 + break; 26 + case D71_BLK_TYPE_CU_SCALER: 27 + pipe = id / D71_PIPELINE_MAX_SCALERS; 28 + id %= D71_PIPELINE_MAX_SCALERS; 29 + id += KOMEDA_COMPONENT_SCALER0; 30 + break; 31 + case D71_BLK_TYPE_CU: 32 + id += KOMEDA_COMPONENT_COMPIZ0; 33 + break; 34 + case D71_BLK_TYPE_LPU_LAYER: 35 + pipe = id / D71_PIPELINE_MAX_LAYERS; 36 + id %= D71_PIPELINE_MAX_LAYERS; 37 + id += KOMEDA_COMPONENT_LAYER0; 38 + break; 39 + case D71_BLK_TYPE_DOU_IPS: 40 + id += KOMEDA_COMPONENT_IPS0; 41 + break; 42 + case D71_BLK_TYPE_CU_MERGER: 43 + id = KOMEDA_COMPONENT_MERGER; 44 + break; 45 + case D71_BLK_TYPE_DOU: 46 + id = KOMEDA_COMPONENT_TIMING_CTRLR; 47 + break; 48 + default: 49 + id = 0xFFFFFFFF; 50 + } 51 + 52 + if (comp_id) 53 + *comp_id = id; 54 + 55 + if (pipe_id) 56 + *pipe_id = pipe; 57 + } 58 + 59 + static u32 get_valid_inputs(struct block_header *blk) 60 + { 61 + u32 valid_inputs = 0, comp_id; 62 + int i; 63 + 64 + for (i = 0; i < PIPELINE_INFO_N_VALID_INPUTS(blk->pipeline_info); i++) { 65 + get_resources_id(blk->input_ids[i], NULL, &comp_id); 66 + if (comp_id == 0xFFFFFFFF) 67 + continue; 68 + valid_inputs |= BIT(comp_id); 69 + } 70 + 71 + return valid_inputs; 72 + } 73 + 74 + static void get_values_from_reg(void __iomem *reg, u32 offset, 75 + u32 count, u32 *val) 76 + { 77 + u32 i, addr; 78 + 79 + for (i = 0; i < count; i++) { 80 + addr = offset + (i << 2); 81 + /* 0xA4 is WO register */ 82 + if (addr != 0xA4) 83 + val[i] = malidp_read32(reg, addr); 84 + else 85 + val[i] = 0xDEADDEAD; 86 + } 87 + } 88 + 89 + static void dump_block_header(struct seq_file *sf, void __iomem *reg) 90 + { 91 + struct block_header hdr; 92 + u32 i, n_input, n_output; 93 + 94 + d71_read_block_header(reg, &hdr); 95 + seq_printf(sf, "BLOCK_INFO:\t\t0x%X\n", hdr.block_info); 96 + seq_printf(sf, "PIPELINE_INFO:\t\t0x%X\n", hdr.pipeline_info); 97 + 98 + n_output = PIPELINE_INFO_N_OUTPUTS(hdr.pipeline_info); 99 + n_input = PIPELINE_INFO_N_VALID_INPUTS(hdr.pipeline_info); 100 + 101 + for (i = 0; i < n_input; i++) 102 + seq_printf(sf, "VALID_INPUT_ID%u:\t0x%X\n", 103 + i, hdr.input_ids[i]); 104 + 105 + for (i = 0; i < n_output; i++) 106 + seq_printf(sf, "OUTPUT_ID%u:\t\t0x%X\n", 107 + i, hdr.output_ids[i]); 108 + } 109 + 110 + static u32 to_rot_ctrl(u32 rot) 111 + { 112 + u32 lr_ctrl = 0; 113 + 114 + switch (rot & DRM_MODE_ROTATE_MASK) { 115 + case DRM_MODE_ROTATE_0: 116 + lr_ctrl |= L_ROT(L_ROT_R0); 117 + break; 118 + case DRM_MODE_ROTATE_90: 119 + lr_ctrl |= L_ROT(L_ROT_R90); 120 + break; 121 + case DRM_MODE_ROTATE_180: 122 + lr_ctrl |= L_ROT(L_ROT_R180); 123 + break; 124 + case DRM_MODE_ROTATE_270: 125 + lr_ctrl |= L_ROT(L_ROT_R270); 126 + break; 127 + } 128 + 129 + if (rot & DRM_MODE_REFLECT_X) 130 + lr_ctrl |= L_HFLIP; 131 + if (rot & DRM_MODE_REFLECT_Y) 132 + lr_ctrl |= L_VFLIP; 133 + 134 + return lr_ctrl; 135 + } 136 + 137 + static inline u32 to_d71_input_id(struct komeda_component_output *output) 138 + { 139 + struct komeda_component *comp = output->component; 140 + 141 + return comp ? (comp->hw_id + output->output_port) : 0; 142 + } 143 + 144 + static void d71_layer_disable(struct komeda_component *c) 145 + { 146 + malidp_write32_mask(c->reg, BLK_CONTROL, L_EN, 0); 147 + } 148 + 149 + static void d71_layer_update(struct komeda_component *c, 150 + struct komeda_component_state *state) 151 + { 152 + struct komeda_layer_state *st = to_layer_st(state); 153 + struct drm_plane_state *plane_st = state->plane->state; 154 + struct drm_framebuffer *fb = plane_st->fb; 155 + struct komeda_fb *kfb = to_kfb(fb); 156 + u32 __iomem *reg = c->reg; 157 + u32 ctrl_mask = L_EN | L_ROT(L_ROT_R270) | L_HFLIP | L_VFLIP | L_TBU_EN; 158 + u32 ctrl = L_EN | to_rot_ctrl(st->rot); 159 + int i; 160 + 161 + for (i = 0; i < fb->format->num_planes; i++) { 162 + malidp_write32(reg, 163 + BLK_P0_PTR_LOW + i * LAYER_PER_PLANE_REGS * 4, 164 + lower_32_bits(st->addr[i])); 165 + malidp_write32(reg, 166 + BLK_P0_PTR_HIGH + i * LAYER_PER_PLANE_REGS * 4, 167 + upper_32_bits(st->addr[i])); 168 + if (i >= 2) 169 + break; 170 + 171 + malidp_write32(reg, 172 + BLK_P0_STRIDE + i * LAYER_PER_PLANE_REGS * 4, 173 + fb->pitches[i] & 0xFFFF); 174 + } 175 + 176 + malidp_write32(reg, LAYER_FMT, kfb->format_caps->hw_id); 177 + malidp_write32(reg, BLK_IN_SIZE, HV_SIZE(st->hsize, st->vsize)); 178 + 179 + malidp_write32_mask(reg, BLK_CONTROL, ctrl_mask, ctrl); 180 + } 181 + 182 + static void d71_layer_dump(struct komeda_component *c, struct seq_file *sf) 183 + { 184 + u32 v[15], i; 185 + bool rich, rgb2rgb; 186 + char *prefix; 187 + 188 + get_values_from_reg(c->reg, LAYER_INFO, 1, &v[14]); 189 + if (v[14] & 0x1) { 190 + rich = true; 191 + prefix = "LR_"; 192 + } else { 193 + rich = false; 194 + prefix = "LS_"; 195 + } 196 + 197 + rgb2rgb = !!(v[14] & L_INFO_CM); 198 + 199 + dump_block_header(sf, c->reg); 200 + 201 + seq_printf(sf, "%sLAYER_INFO:\t\t0x%X\n", prefix, v[14]); 202 + 203 + get_values_from_reg(c->reg, 0xD0, 1, v); 204 + seq_printf(sf, "%sCONTROL:\t\t0x%X\n", prefix, v[0]); 205 + if (rich) { 206 + get_values_from_reg(c->reg, 0xD4, 1, v); 207 + seq_printf(sf, "LR_RICH_CONTROL:\t0x%X\n", v[0]); 208 + } 209 + get_values_from_reg(c->reg, 0xD8, 4, v); 210 + seq_printf(sf, "%sFORMAT:\t\t0x%X\n", prefix, v[0]); 211 + seq_printf(sf, "%sIT_COEFFTAB:\t\t0x%X\n", prefix, v[1]); 212 + seq_printf(sf, "%sIN_SIZE:\t\t0x%X\n", prefix, v[2]); 213 + seq_printf(sf, "%sPALPHA:\t\t0x%X\n", prefix, v[3]); 214 + 215 + get_values_from_reg(c->reg, 0x100, 3, v); 216 + seq_printf(sf, "%sP0_PTR_LOW:\t\t0x%X\n", prefix, v[0]); 217 + seq_printf(sf, "%sP0_PTR_HIGH:\t\t0x%X\n", prefix, v[1]); 218 + seq_printf(sf, "%sP0_STRIDE:\t\t0x%X\n", prefix, v[2]); 219 + 220 + get_values_from_reg(c->reg, 0x110, 2, v); 221 + seq_printf(sf, "%sP1_PTR_LOW:\t\t0x%X\n", prefix, v[0]); 222 + seq_printf(sf, "%sP1_PTR_HIGH:\t\t0x%X\n", prefix, v[1]); 223 + if (rich) { 224 + get_values_from_reg(c->reg, 0x118, 1, v); 225 + seq_printf(sf, "LR_P1_STRIDE:\t\t0x%X\n", v[0]); 226 + 227 + get_values_from_reg(c->reg, 0x120, 2, v); 228 + seq_printf(sf, "LR_P2_PTR_LOW:\t\t0x%X\n", v[0]); 229 + seq_printf(sf, "LR_P2_PTR_HIGH:\t\t0x%X\n", v[1]); 230 + 231 + get_values_from_reg(c->reg, 0x130, 12, v); 232 + for (i = 0; i < 12; i++) 233 + seq_printf(sf, "LR_YUV_RGB_COEFF%u:\t0x%X\n", i, v[i]); 234 + } 235 + 236 + if (rgb2rgb) { 237 + get_values_from_reg(c->reg, LAYER_RGB_RGB_COEFF0, 12, v); 238 + for (i = 0; i < 12; i++) 239 + seq_printf(sf, "LS_RGB_RGB_COEFF%u:\t0x%X\n", i, v[i]); 240 + } 241 + 242 + get_values_from_reg(c->reg, 0x160, 3, v); 243 + seq_printf(sf, "%sAD_CONTROL:\t\t0x%X\n", prefix, v[0]); 244 + seq_printf(sf, "%sAD_H_CROP:\t\t0x%X\n", prefix, v[1]); 245 + seq_printf(sf, "%sAD_V_CROP:\t\t0x%X\n", prefix, v[2]); 246 + } 247 + 248 + static struct komeda_component_funcs d71_layer_funcs = { 249 + .update = d71_layer_update, 250 + .disable = d71_layer_disable, 251 + .dump_register = d71_layer_dump, 252 + }; 253 + 254 + static int d71_layer_init(struct d71_dev *d71, 255 + struct block_header *blk, u32 __iomem *reg) 256 + { 257 + struct komeda_component *c; 258 + struct komeda_layer *layer; 259 + u32 pipe_id, layer_id, layer_info; 260 + 261 + get_resources_id(blk->block_info, &pipe_id, &layer_id); 262 + c = komeda_component_add(&d71->pipes[pipe_id]->base, sizeof(*layer), 263 + layer_id, 264 + BLOCK_INFO_INPUT_ID(blk->block_info), 265 + &d71_layer_funcs, 0, 266 + get_valid_inputs(blk), 267 + 1, reg, "LPU%d_LAYER%d", pipe_id, layer_id); 268 + if (IS_ERR(c)) { 269 + DRM_ERROR("Failed to add layer component\n"); 270 + return PTR_ERR(c); 271 + } 272 + 273 + layer = to_layer(c); 274 + layer_info = malidp_read32(reg, LAYER_INFO); 275 + 276 + if (layer_info & L_INFO_RF) 277 + layer->layer_type = KOMEDA_FMT_RICH_LAYER; 278 + else 279 + layer->layer_type = KOMEDA_FMT_SIMPLE_LAYER; 280 + 281 + set_range(&layer->hsize_in, 4, d71->max_line_size); 282 + set_range(&layer->vsize_in, 4, d71->max_vsize); 283 + 284 + malidp_write32(reg, LAYER_PALPHA, D71_PALPHA_DEF_MAP); 285 + 286 + layer->supported_rots = DRM_MODE_ROTATE_MASK | DRM_MODE_REFLECT_MASK; 287 + 288 + return 0; 289 + } 290 + 291 + static int d71_wb_layer_init(struct d71_dev *d71, 292 + struct block_header *blk, u32 __iomem *reg) 293 + { 294 + DRM_DEBUG("Detect D71_Wb_Layer.\n"); 295 + 296 + return 0; 297 + } 298 + 299 + static void d71_component_disable(struct komeda_component *c) 300 + { 301 + u32 __iomem *reg = c->reg; 302 + u32 i; 303 + 304 + malidp_write32(reg, BLK_CONTROL, 0); 305 + 306 + for (i = 0; i < c->max_active_inputs; i++) 307 + malidp_write32(reg, BLK_INPUT_ID0 + (i << 2), 0); 308 + } 309 + 310 + static void compiz_enable_input(u32 __iomem *id_reg, 311 + u32 __iomem *cfg_reg, 312 + u32 input_hw_id, 313 + struct komeda_compiz_input_cfg *cin) 314 + { 315 + u32 ctrl = CU_INPUT_CTRL_EN; 316 + u8 blend = cin->pixel_blend_mode; 317 + 318 + if (blend == DRM_MODE_BLEND_PIXEL_NONE) 319 + ctrl |= CU_INPUT_CTRL_PAD; 320 + else if (blend == DRM_MODE_BLEND_PREMULTI) 321 + ctrl |= CU_INPUT_CTRL_PMUL; 322 + 323 + ctrl |= CU_INPUT_CTRL_ALPHA(cin->layer_alpha); 324 + 325 + malidp_write32(id_reg, BLK_INPUT_ID0, input_hw_id); 326 + 327 + malidp_write32(cfg_reg, CU_INPUT0_SIZE, 328 + HV_SIZE(cin->hsize, cin->vsize)); 329 + malidp_write32(cfg_reg, CU_INPUT0_OFFSET, 330 + HV_OFFSET(cin->hoffset, cin->voffset)); 331 + malidp_write32(cfg_reg, CU_INPUT0_CONTROL, ctrl); 332 + } 333 + 334 + static void d71_compiz_update(struct komeda_component *c, 335 + struct komeda_component_state *state) 336 + { 337 + struct komeda_compiz_state *st = to_compiz_st(state); 338 + u32 __iomem *reg = c->reg; 339 + u32 __iomem *id_reg, *cfg_reg; 340 + u32 index, input_hw_id; 341 + 342 + for_each_changed_input(state, index) { 343 + id_reg = reg + index; 344 + cfg_reg = reg + index * CU_PER_INPUT_REGS; 345 + input_hw_id = to_d71_input_id(&state->inputs[index]); 346 + if (state->active_inputs & BIT(index)) { 347 + compiz_enable_input(id_reg, cfg_reg, 348 + input_hw_id, &st->cins[index]); 349 + } else { 350 + malidp_write32(id_reg, BLK_INPUT_ID0, 0); 351 + malidp_write32(cfg_reg, CU_INPUT0_CONTROL, 0); 352 + } 353 + } 354 + 355 + malidp_write32(reg, BLK_SIZE, HV_SIZE(st->hsize, st->vsize)); 356 + } 357 + 358 + static void d71_compiz_dump(struct komeda_component *c, struct seq_file *sf) 359 + { 360 + u32 v[8], i; 361 + 362 + dump_block_header(sf, c->reg); 363 + 364 + get_values_from_reg(c->reg, 0x80, 5, v); 365 + for (i = 0; i < 5; i++) 366 + seq_printf(sf, "CU_INPUT_ID%u:\t\t0x%X\n", i, v[i]); 367 + 368 + get_values_from_reg(c->reg, 0xA0, 5, v); 369 + seq_printf(sf, "CU_IRQ_RAW_STATUS:\t0x%X\n", v[0]); 370 + seq_printf(sf, "CU_IRQ_CLEAR:\t\t0x%X\n", v[1]); 371 + seq_printf(sf, "CU_IRQ_MASK:\t\t0x%X\n", v[2]); 372 + seq_printf(sf, "CU_IRQ_STATUS:\t\t0x%X\n", v[3]); 373 + seq_printf(sf, "CU_STATUS:\t\t0x%X\n", v[4]); 374 + 375 + get_values_from_reg(c->reg, 0xD0, 2, v); 376 + seq_printf(sf, "CU_CONTROL:\t\t0x%X\n", v[0]); 377 + seq_printf(sf, "CU_SIZE:\t\t0x%X\n", v[1]); 378 + 379 + get_values_from_reg(c->reg, 0xDC, 1, v); 380 + seq_printf(sf, "CU_BG_COLOR:\t\t0x%X\n", v[0]); 381 + 382 + for (i = 0, v[4] = 0xE0; i < 5; i++, v[4] += 0x10) { 383 + get_values_from_reg(c->reg, v[4], 3, v); 384 + seq_printf(sf, "CU_INPUT%u_SIZE:\t\t0x%X\n", i, v[0]); 385 + seq_printf(sf, "CU_INPUT%u_OFFSET:\t0x%X\n", i, v[1]); 386 + seq_printf(sf, "CU_INPUT%u_CONTROL:\t0x%X\n", i, v[2]); 387 + } 388 + 389 + get_values_from_reg(c->reg, 0x130, 2, v); 390 + seq_printf(sf, "CU_USER_LOW:\t\t0x%X\n", v[0]); 391 + seq_printf(sf, "CU_USER_HIGH:\t\t0x%X\n", v[1]); 392 + } 393 + 394 + struct komeda_component_funcs d71_compiz_funcs = { 395 + .update = d71_compiz_update, 396 + .disable = d71_component_disable, 397 + .dump_register = d71_compiz_dump, 398 + }; 399 + 400 + static int d71_compiz_init(struct d71_dev *d71, 401 + struct block_header *blk, u32 __iomem *reg) 402 + { 403 + struct komeda_component *c; 404 + struct komeda_compiz *compiz; 405 + u32 pipe_id, comp_id; 406 + 407 + get_resources_id(blk->block_info, &pipe_id, &comp_id); 408 + 409 + c = komeda_component_add(&d71->pipes[pipe_id]->base, sizeof(*compiz), 410 + comp_id, 411 + BLOCK_INFO_INPUT_ID(blk->block_info), 412 + &d71_compiz_funcs, 413 + CU_NUM_INPUT_IDS, get_valid_inputs(blk), 414 + CU_NUM_OUTPUT_IDS, reg, 415 + "CU%d", pipe_id); 416 + if (IS_ERR(c)) 417 + return PTR_ERR(c); 418 + 419 + compiz = to_compiz(c); 420 + 421 + set_range(&compiz->hsize, D71_MIN_LINE_SIZE, d71->max_line_size); 422 + set_range(&compiz->vsize, D71_MIN_VERTICAL_SIZE, d71->max_vsize); 423 + 424 + return 0; 425 + } 426 + 427 + static void d71_improc_update(struct komeda_component *c, 428 + struct komeda_component_state *state) 429 + { 430 + struct komeda_improc_state *st = to_improc_st(state); 431 + u32 __iomem *reg = c->reg; 432 + u32 index, input_hw_id; 433 + 434 + for_each_changed_input(state, index) { 435 + input_hw_id = state->active_inputs & BIT(index) ? 436 + to_d71_input_id(&state->inputs[index]) : 0; 437 + malidp_write32(reg, BLK_INPUT_ID0 + index * 4, input_hw_id); 438 + } 439 + 440 + malidp_write32(reg, BLK_SIZE, HV_SIZE(st->hsize, st->vsize)); 441 + } 442 + 443 + static void d71_improc_dump(struct komeda_component *c, struct seq_file *sf) 444 + { 445 + u32 v[12], i; 446 + 447 + dump_block_header(sf, c->reg); 448 + 449 + get_values_from_reg(c->reg, 0x80, 2, v); 450 + seq_printf(sf, "IPS_INPUT_ID0:\t\t0x%X\n", v[0]); 451 + seq_printf(sf, "IPS_INPUT_ID1:\t\t0x%X\n", v[1]); 452 + 453 + get_values_from_reg(c->reg, 0xC0, 1, v); 454 + seq_printf(sf, "IPS_INFO:\t\t0x%X\n", v[0]); 455 + 456 + get_values_from_reg(c->reg, 0xD0, 3, v); 457 + seq_printf(sf, "IPS_CONTROL:\t\t0x%X\n", v[0]); 458 + seq_printf(sf, "IPS_SIZE:\t\t0x%X\n", v[1]); 459 + seq_printf(sf, "IPS_DEPTH:\t\t0x%X\n", v[2]); 460 + 461 + get_values_from_reg(c->reg, 0x130, 12, v); 462 + for (i = 0; i < 12; i++) 463 + seq_printf(sf, "IPS_RGB_RGB_COEFF%u:\t0x%X\n", i, v[i]); 464 + 465 + get_values_from_reg(c->reg, 0x170, 12, v); 466 + for (i = 0; i < 12; i++) 467 + seq_printf(sf, "IPS_RGB_YUV_COEFF%u:\t0x%X\n", i, v[i]); 468 + } 469 + 470 + struct komeda_component_funcs d71_improc_funcs = { 471 + .update = d71_improc_update, 472 + .disable = d71_component_disable, 473 + .dump_register = d71_improc_dump, 474 + }; 475 + 476 + static int d71_improc_init(struct d71_dev *d71, 477 + struct block_header *blk, u32 __iomem *reg) 478 + { 479 + struct komeda_component *c; 480 + struct komeda_improc *improc; 481 + u32 pipe_id, comp_id, value; 482 + 483 + get_resources_id(blk->block_info, &pipe_id, &comp_id); 484 + 485 + c = komeda_component_add(&d71->pipes[pipe_id]->base, sizeof(*improc), 486 + comp_id, 487 + BLOCK_INFO_INPUT_ID(blk->block_info), 488 + &d71_improc_funcs, IPS_NUM_INPUT_IDS, 489 + get_valid_inputs(blk), 490 + IPS_NUM_OUTPUT_IDS, reg, "DOU%d_IPS", pipe_id); 491 + if (IS_ERR(c)) { 492 + DRM_ERROR("Failed to add improc component\n"); 493 + return PTR_ERR(c); 494 + } 495 + 496 + improc = to_improc(c); 497 + improc->supported_color_depths = BIT(8) | BIT(10); 498 + improc->supported_color_formats = DRM_COLOR_FORMAT_RGB444 | 499 + DRM_COLOR_FORMAT_YCRCB444 | 500 + DRM_COLOR_FORMAT_YCRCB422; 501 + value = malidp_read32(reg, BLK_INFO); 502 + if (value & IPS_INFO_CHD420) 503 + improc->supported_color_formats |= DRM_COLOR_FORMAT_YCRCB420; 504 + 505 + improc->supports_csc = true; 506 + improc->supports_gamma = true; 507 + 508 + return 0; 509 + } 510 + 511 + static void d71_timing_ctrlr_disable(struct komeda_component *c) 512 + { 513 + malidp_write32_mask(c->reg, BLK_CONTROL, BS_CTRL_EN, 0); 514 + } 515 + 516 + static void d71_timing_ctrlr_update(struct komeda_component *c, 517 + struct komeda_component_state *state) 518 + { 519 + struct drm_crtc_state *crtc_st = state->crtc->state; 520 + u32 __iomem *reg = c->reg; 521 + struct videomode vm; 522 + u32 value; 523 + 524 + drm_display_mode_to_videomode(&crtc_st->adjusted_mode, &vm); 525 + 526 + malidp_write32(reg, BS_ACTIVESIZE, HV_SIZE(vm.hactive, vm.vactive)); 527 + malidp_write32(reg, BS_HINTERVALS, BS_H_INTVALS(vm.hfront_porch, 528 + vm.hback_porch)); 529 + malidp_write32(reg, BS_VINTERVALS, BS_V_INTVALS(vm.vfront_porch, 530 + vm.vback_porch)); 531 + 532 + value = BS_SYNC_VSW(vm.vsync_len) | BS_SYNC_HSW(vm.hsync_len); 533 + value |= vm.flags & DISPLAY_FLAGS_VSYNC_HIGH ? BS_SYNC_VSP : 0; 534 + value |= vm.flags & DISPLAY_FLAGS_HSYNC_HIGH ? BS_SYNC_HSP : 0; 535 + malidp_write32(reg, BS_SYNC, value); 536 + 537 + malidp_write32(reg, BS_PROG_LINE, D71_DEFAULT_PREPRETCH_LINE - 1); 538 + malidp_write32(reg, BS_PREFETCH_LINE, D71_DEFAULT_PREPRETCH_LINE); 539 + 540 + /* configure bs control register */ 541 + value = BS_CTRL_EN | BS_CTRL_VM; 542 + 543 + malidp_write32(reg, BLK_CONTROL, value); 544 + } 545 + 546 + void d71_timing_ctrlr_dump(struct komeda_component *c, struct seq_file *sf) 547 + { 548 + u32 v[8], i; 549 + 550 + dump_block_header(sf, c->reg); 551 + 552 + get_values_from_reg(c->reg, 0xC0, 1, v); 553 + seq_printf(sf, "BS_INFO:\t\t0x%X\n", v[0]); 554 + 555 + get_values_from_reg(c->reg, 0xD0, 8, v); 556 + seq_printf(sf, "BS_CONTROL:\t\t0x%X\n", v[0]); 557 + seq_printf(sf, "BS_PROG_LINE:\t\t0x%X\n", v[1]); 558 + seq_printf(sf, "BS_PREFETCH_LINE:\t0x%X\n", v[2]); 559 + seq_printf(sf, "BS_BG_COLOR:\t\t0x%X\n", v[3]); 560 + seq_printf(sf, "BS_ACTIVESIZE:\t\t0x%X\n", v[4]); 561 + seq_printf(sf, "BS_HINTERVALS:\t\t0x%X\n", v[5]); 562 + seq_printf(sf, "BS_VINTERVALS:\t\t0x%X\n", v[6]); 563 + seq_printf(sf, "BS_SYNC:\t\t0x%X\n", v[7]); 564 + 565 + get_values_from_reg(c->reg, 0x100, 3, v); 566 + seq_printf(sf, "BS_DRIFT_TO:\t\t0x%X\n", v[0]); 567 + seq_printf(sf, "BS_FRAME_TO:\t\t0x%X\n", v[1]); 568 + seq_printf(sf, "BS_TE_TO:\t\t0x%X\n", v[2]); 569 + 570 + get_values_from_reg(c->reg, 0x110, 3, v); 571 + for (i = 0; i < 3; i++) 572 + seq_printf(sf, "BS_T%u_INTERVAL:\t\t0x%X\n", i, v[i]); 573 + 574 + get_values_from_reg(c->reg, 0x120, 5, v); 575 + for (i = 0; i < 2; i++) { 576 + seq_printf(sf, "BS_CRC%u_LOW:\t\t0x%X\n", i, v[i << 1]); 577 + seq_printf(sf, "BS_CRC%u_HIGH:\t\t0x%X\n", i, v[(i << 1) + 1]); 578 + } 579 + seq_printf(sf, "BS_USER:\t\t0x%X\n", v[4]); 580 + } 581 + 582 + struct komeda_component_funcs d71_timing_ctrlr_funcs = { 583 + .update = d71_timing_ctrlr_update, 584 + .disable = d71_timing_ctrlr_disable, 585 + .dump_register = d71_timing_ctrlr_dump, 586 + }; 587 + 588 + static int d71_timing_ctrlr_init(struct d71_dev *d71, 589 + struct block_header *blk, u32 __iomem *reg) 590 + { 591 + struct komeda_component *c; 592 + struct komeda_timing_ctrlr *ctrlr; 593 + u32 pipe_id, comp_id; 594 + 595 + get_resources_id(blk->block_info, &pipe_id, &comp_id); 596 + 597 + c = komeda_component_add(&d71->pipes[pipe_id]->base, sizeof(*ctrlr), 598 + KOMEDA_COMPONENT_TIMING_CTRLR, 599 + BLOCK_INFO_INPUT_ID(blk->block_info), 600 + &d71_timing_ctrlr_funcs, 601 + 1, BIT(KOMEDA_COMPONENT_IPS0 + pipe_id), 602 + BS_NUM_OUTPUT_IDS, reg, "DOU%d_BS", pipe_id); 603 + if (IS_ERR(c)) { 604 + DRM_ERROR("Failed to add display_ctrl component\n"); 605 + return PTR_ERR(c); 606 + } 607 + 608 + ctrlr = to_ctrlr(c); 609 + 610 + ctrlr->supports_dual_link = true; 611 + 612 + return 0; 613 + } 614 + 615 + int d71_probe_block(struct d71_dev *d71, 616 + struct block_header *blk, u32 __iomem *reg) 617 + { 618 + struct d71_pipeline *pipe; 619 + int blk_id = BLOCK_INFO_BLK_ID(blk->block_info); 620 + 621 + int err = 0; 622 + 623 + switch (BLOCK_INFO_BLK_TYPE(blk->block_info)) { 624 + case D71_BLK_TYPE_GCU: 625 + break; 626 + 627 + case D71_BLK_TYPE_LPU: 628 + pipe = d71->pipes[blk_id]; 629 + pipe->lpu_addr = reg; 630 + break; 631 + 632 + case D71_BLK_TYPE_LPU_LAYER: 633 + err = d71_layer_init(d71, blk, reg); 634 + break; 635 + 636 + case D71_BLK_TYPE_LPU_WB_LAYER: 637 + err = d71_wb_layer_init(d71, blk, reg); 638 + break; 639 + 640 + case D71_BLK_TYPE_CU: 641 + pipe = d71->pipes[blk_id]; 642 + pipe->cu_addr = reg; 643 + err = d71_compiz_init(d71, blk, reg); 644 + break; 645 + 646 + case D71_BLK_TYPE_CU_SPLITTER: 647 + case D71_BLK_TYPE_CU_SCALER: 648 + case D71_BLK_TYPE_CU_MERGER: 649 + break; 650 + 651 + case D71_BLK_TYPE_DOU: 652 + pipe = d71->pipes[blk_id]; 653 + pipe->dou_addr = reg; 654 + break; 655 + 656 + case D71_BLK_TYPE_DOU_IPS: 657 + err = d71_improc_init(d71, blk, reg); 658 + break; 659 + 660 + case D71_BLK_TYPE_DOU_FT_COEFF: 661 + pipe = d71->pipes[blk_id]; 662 + pipe->dou_ft_coeff_addr = reg; 663 + break; 664 + 665 + case D71_BLK_TYPE_DOU_BS: 666 + err = d71_timing_ctrlr_init(d71, blk, reg); 667 + break; 668 + 669 + case D71_BLK_TYPE_GLB_LT_COEFF: 670 + break; 671 + 672 + case D71_BLK_TYPE_GLB_SCL_COEFF: 673 + d71->glb_scl_coeff_addr[blk_id] = reg; 674 + break; 675 + 676 + default: 677 + DRM_ERROR("Unknown block (block_info: 0x%x) is found\n", 678 + blk->block_info); 679 + err = -EINVAL; 680 + break; 681 + } 682 + 683 + return err; 684 + }
+369 -8
drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
··· 4 4 * Author: James.Qian.Wang <james.qian.wang@arm.com> 5 5 * 6 6 */ 7 + 8 + #include <drm/drm_print.h> 9 + #include "d71_dev.h" 7 10 #include "malidp_io.h" 8 - #include "komeda_dev.h" 11 + 12 + static u64 get_lpu_event(struct d71_pipeline *d71_pipeline) 13 + { 14 + u32 __iomem *reg = d71_pipeline->lpu_addr; 15 + u32 status, raw_status; 16 + u64 evts = 0ULL; 17 + 18 + raw_status = malidp_read32(reg, BLK_IRQ_RAW_STATUS); 19 + if (raw_status & LPU_IRQ_IBSY) 20 + evts |= KOMEDA_EVENT_IBSY; 21 + if (raw_status & LPU_IRQ_EOW) 22 + evts |= KOMEDA_EVENT_EOW; 23 + 24 + if (raw_status & (LPU_IRQ_ERR | LPU_IRQ_IBSY)) { 25 + u32 restore = 0, tbu_status; 26 + /* Check error of LPU status */ 27 + status = malidp_read32(reg, BLK_STATUS); 28 + if (status & LPU_STATUS_AXIE) { 29 + restore |= LPU_STATUS_AXIE; 30 + evts |= KOMEDA_ERR_AXIE; 31 + } 32 + if (status & LPU_STATUS_ACE0) { 33 + restore |= LPU_STATUS_ACE0; 34 + evts |= KOMEDA_ERR_ACE0; 35 + } 36 + if (status & LPU_STATUS_ACE1) { 37 + restore |= LPU_STATUS_ACE1; 38 + evts |= KOMEDA_ERR_ACE1; 39 + } 40 + if (status & LPU_STATUS_ACE2) { 41 + restore |= LPU_STATUS_ACE2; 42 + evts |= KOMEDA_ERR_ACE2; 43 + } 44 + if (status & LPU_STATUS_ACE3) { 45 + restore |= LPU_STATUS_ACE3; 46 + evts |= KOMEDA_ERR_ACE3; 47 + } 48 + if (restore != 0) 49 + malidp_write32_mask(reg, BLK_STATUS, restore, 0); 50 + 51 + restore = 0; 52 + /* Check errors of TBU status */ 53 + tbu_status = malidp_read32(reg, LPU_TBU_STATUS); 54 + if (tbu_status & LPU_TBU_STATUS_TCF) { 55 + restore |= LPU_TBU_STATUS_TCF; 56 + evts |= KOMEDA_ERR_TCF; 57 + } 58 + if (tbu_status & LPU_TBU_STATUS_TTNG) { 59 + restore |= LPU_TBU_STATUS_TTNG; 60 + evts |= KOMEDA_ERR_TTNG; 61 + } 62 + if (tbu_status & LPU_TBU_STATUS_TITR) { 63 + restore |= LPU_TBU_STATUS_TITR; 64 + evts |= KOMEDA_ERR_TITR; 65 + } 66 + if (tbu_status & LPU_TBU_STATUS_TEMR) { 67 + restore |= LPU_TBU_STATUS_TEMR; 68 + evts |= KOMEDA_ERR_TEMR; 69 + } 70 + if (tbu_status & LPU_TBU_STATUS_TTF) { 71 + restore |= LPU_TBU_STATUS_TTF; 72 + evts |= KOMEDA_ERR_TTF; 73 + } 74 + if (restore != 0) 75 + malidp_write32_mask(reg, LPU_TBU_STATUS, restore, 0); 76 + } 77 + 78 + malidp_write32(reg, BLK_IRQ_CLEAR, raw_status); 79 + return evts; 80 + } 81 + 82 + static u64 get_cu_event(struct d71_pipeline *d71_pipeline) 83 + { 84 + u32 __iomem *reg = d71_pipeline->cu_addr; 85 + u32 status, raw_status; 86 + u64 evts = 0ULL; 87 + 88 + raw_status = malidp_read32(reg, BLK_IRQ_RAW_STATUS); 89 + if (raw_status & CU_IRQ_OVR) 90 + evts |= KOMEDA_EVENT_OVR; 91 + 92 + if (raw_status & (CU_IRQ_ERR | CU_IRQ_OVR)) { 93 + status = malidp_read32(reg, BLK_STATUS) & 0x7FFFFFFF; 94 + if (status & CU_STATUS_CPE) 95 + evts |= KOMEDA_ERR_CPE; 96 + if (status & CU_STATUS_ZME) 97 + evts |= KOMEDA_ERR_ZME; 98 + if (status & CU_STATUS_CFGE) 99 + evts |= KOMEDA_ERR_CFGE; 100 + if (status) 101 + malidp_write32_mask(reg, BLK_STATUS, status, 0); 102 + } 103 + 104 + malidp_write32(reg, BLK_IRQ_CLEAR, raw_status); 105 + 106 + return evts; 107 + } 108 + 109 + static u64 get_dou_event(struct d71_pipeline *d71_pipeline) 110 + { 111 + u32 __iomem *reg = d71_pipeline->dou_addr; 112 + u32 status, raw_status; 113 + u64 evts = 0ULL; 114 + 115 + raw_status = malidp_read32(reg, BLK_IRQ_RAW_STATUS); 116 + if (raw_status & DOU_IRQ_PL0) 117 + evts |= KOMEDA_EVENT_VSYNC; 118 + if (raw_status & DOU_IRQ_UND) 119 + evts |= KOMEDA_EVENT_URUN; 120 + 121 + if (raw_status & (DOU_IRQ_ERR | DOU_IRQ_UND)) { 122 + u32 restore = 0; 123 + 124 + status = malidp_read32(reg, BLK_STATUS); 125 + if (status & DOU_STATUS_DRIFTTO) { 126 + restore |= DOU_STATUS_DRIFTTO; 127 + evts |= KOMEDA_ERR_DRIFTTO; 128 + } 129 + if (status & DOU_STATUS_FRAMETO) { 130 + restore |= DOU_STATUS_FRAMETO; 131 + evts |= KOMEDA_ERR_FRAMETO; 132 + } 133 + if (status & DOU_STATUS_TETO) { 134 + restore |= DOU_STATUS_TETO; 135 + evts |= KOMEDA_ERR_TETO; 136 + } 137 + if (status & DOU_STATUS_CSCE) { 138 + restore |= DOU_STATUS_CSCE; 139 + evts |= KOMEDA_ERR_CSCE; 140 + } 141 + 142 + if (restore != 0) 143 + malidp_write32_mask(reg, BLK_STATUS, restore, 0); 144 + } 145 + 146 + malidp_write32(reg, BLK_IRQ_CLEAR, raw_status); 147 + return evts; 148 + } 149 + 150 + static u64 get_pipeline_event(struct d71_pipeline *d71_pipeline, u32 gcu_status) 151 + { 152 + u32 evts = 0ULL; 153 + 154 + if (gcu_status & (GLB_IRQ_STATUS_LPU0 | GLB_IRQ_STATUS_LPU1)) 155 + evts |= get_lpu_event(d71_pipeline); 156 + 157 + if (gcu_status & (GLB_IRQ_STATUS_CU0 | GLB_IRQ_STATUS_CU1)) 158 + evts |= get_cu_event(d71_pipeline); 159 + 160 + if (gcu_status & (GLB_IRQ_STATUS_DOU0 | GLB_IRQ_STATUS_DOU1)) 161 + evts |= get_dou_event(d71_pipeline); 162 + 163 + return evts; 164 + } 165 + 166 + static irqreturn_t 167 + d71_irq_handler(struct komeda_dev *mdev, struct komeda_events *evts) 168 + { 169 + struct d71_dev *d71 = mdev->chip_data; 170 + u32 status, gcu_status, raw_status; 171 + 172 + gcu_status = malidp_read32(d71->gcu_addr, GLB_IRQ_STATUS); 173 + 174 + if (gcu_status & GLB_IRQ_STATUS_GCU) { 175 + raw_status = malidp_read32(d71->gcu_addr, BLK_IRQ_RAW_STATUS); 176 + if (raw_status & GCU_IRQ_CVAL0) 177 + evts->pipes[0] |= KOMEDA_EVENT_FLIP; 178 + if (raw_status & GCU_IRQ_CVAL1) 179 + evts->pipes[1] |= KOMEDA_EVENT_FLIP; 180 + if (raw_status & GCU_IRQ_ERR) { 181 + status = malidp_read32(d71->gcu_addr, BLK_STATUS); 182 + if (status & GCU_STATUS_MERR) { 183 + evts->global |= KOMEDA_ERR_MERR; 184 + malidp_write32_mask(d71->gcu_addr, BLK_STATUS, 185 + GCU_STATUS_MERR, 0); 186 + } 187 + } 188 + 189 + malidp_write32(d71->gcu_addr, BLK_IRQ_CLEAR, raw_status); 190 + } 191 + 192 + if (gcu_status & GLB_IRQ_STATUS_PIPE0) 193 + evts->pipes[0] |= get_pipeline_event(d71->pipes[0], gcu_status); 194 + 195 + if (gcu_status & GLB_IRQ_STATUS_PIPE1) 196 + evts->pipes[1] |= get_pipeline_event(d71->pipes[1], gcu_status); 197 + 198 + return gcu_status ? IRQ_HANDLED : IRQ_NONE; 199 + } 200 + 201 + #define ENABLED_GCU_IRQS (GCU_IRQ_CVAL0 | GCU_IRQ_CVAL1 | \ 202 + GCU_IRQ_MODE | GCU_IRQ_ERR) 203 + #define ENABLED_LPU_IRQS (LPU_IRQ_IBSY | LPU_IRQ_ERR | LPU_IRQ_EOW) 204 + #define ENABLED_CU_IRQS (CU_IRQ_OVR | CU_IRQ_ERR) 205 + #define ENABLED_DOU_IRQS (DOU_IRQ_UND | DOU_IRQ_ERR) 206 + 207 + static int d71_enable_irq(struct komeda_dev *mdev) 208 + { 209 + struct d71_dev *d71 = mdev->chip_data; 210 + struct d71_pipeline *pipe; 211 + u32 i; 212 + 213 + malidp_write32_mask(d71->gcu_addr, BLK_IRQ_MASK, 214 + ENABLED_GCU_IRQS, ENABLED_GCU_IRQS); 215 + for (i = 0; i < d71->num_pipelines; i++) { 216 + pipe = d71->pipes[i]; 217 + malidp_write32_mask(pipe->cu_addr, BLK_IRQ_MASK, 218 + ENABLED_CU_IRQS, ENABLED_CU_IRQS); 219 + malidp_write32_mask(pipe->lpu_addr, BLK_IRQ_MASK, 220 + ENABLED_LPU_IRQS, ENABLED_LPU_IRQS); 221 + malidp_write32_mask(pipe->dou_addr, BLK_IRQ_MASK, 222 + ENABLED_DOU_IRQS, ENABLED_DOU_IRQS); 223 + } 224 + return 0; 225 + } 226 + 227 + static int d71_disable_irq(struct komeda_dev *mdev) 228 + { 229 + struct d71_dev *d71 = mdev->chip_data; 230 + struct d71_pipeline *pipe; 231 + u32 i; 232 + 233 + malidp_write32_mask(d71->gcu_addr, BLK_IRQ_MASK, ENABLED_GCU_IRQS, 0); 234 + for (i = 0; i < d71->num_pipelines; i++) { 235 + pipe = d71->pipes[i]; 236 + malidp_write32_mask(pipe->cu_addr, BLK_IRQ_MASK, 237 + ENABLED_CU_IRQS, 0); 238 + malidp_write32_mask(pipe->lpu_addr, BLK_IRQ_MASK, 239 + ENABLED_LPU_IRQS, 0); 240 + malidp_write32_mask(pipe->dou_addr, BLK_IRQ_MASK, 241 + ENABLED_DOU_IRQS, 0); 242 + } 243 + return 0; 244 + } 245 + 246 + static int d71_reset(struct d71_dev *d71) 247 + { 248 + u32 __iomem *gcu = d71->gcu_addr; 249 + int ret; 250 + 251 + malidp_write32_mask(gcu, BLK_CONTROL, 252 + GCU_CONTROL_SRST, GCU_CONTROL_SRST); 253 + 254 + ret = dp_wait_cond(!(malidp_read32(gcu, BLK_CONTROL) & GCU_CONTROL_SRST), 255 + 100, 1000, 10000); 256 + 257 + return ret > 0 ? 0 : -ETIMEDOUT; 258 + } 259 + 260 + void d71_read_block_header(u32 __iomem *reg, struct block_header *blk) 261 + { 262 + int i; 263 + 264 + blk->block_info = malidp_read32(reg, BLK_BLOCK_INFO); 265 + if (BLOCK_INFO_BLK_TYPE(blk->block_info) == D71_BLK_TYPE_RESERVED) 266 + return; 267 + 268 + blk->pipeline_info = malidp_read32(reg, BLK_PIPELINE_INFO); 269 + 270 + /* get valid input and output ids */ 271 + for (i = 0; i < PIPELINE_INFO_N_VALID_INPUTS(blk->pipeline_info); i++) 272 + blk->input_ids[i] = malidp_read32(reg + i, BLK_VALID_INPUT_ID0); 273 + for (i = 0; i < PIPELINE_INFO_N_OUTPUTS(blk->pipeline_info); i++) 274 + blk->output_ids[i] = malidp_read32(reg + i, BLK_OUTPUT_ID0); 275 + } 276 + 277 + static void d71_cleanup(struct komeda_dev *mdev) 278 + { 279 + struct d71_dev *d71 = mdev->chip_data; 280 + 281 + if (!d71) 282 + return; 283 + 284 + devm_kfree(mdev->dev, d71); 285 + mdev->chip_data = NULL; 286 + } 9 287 10 288 static int d71_enum_resources(struct komeda_dev *mdev) 11 289 { 12 - /* TODO add enum resources */ 13 - return -1; 290 + struct d71_dev *d71; 291 + struct komeda_pipeline *pipe; 292 + struct block_header blk; 293 + u32 __iomem *blk_base; 294 + u32 i, value, offset; 295 + int err; 296 + 297 + d71 = devm_kzalloc(mdev->dev, sizeof(*d71), GFP_KERNEL); 298 + if (!d71) 299 + return -ENOMEM; 300 + 301 + mdev->chip_data = d71; 302 + d71->mdev = mdev; 303 + d71->gcu_addr = mdev->reg_base; 304 + d71->periph_addr = mdev->reg_base + (D71_BLOCK_OFFSET_PERIPH >> 2); 305 + 306 + err = d71_reset(d71); 307 + if (err) { 308 + DRM_ERROR("Fail to reset d71 device.\n"); 309 + goto err_cleanup; 310 + } 311 + 312 + /* probe GCU */ 313 + value = malidp_read32(d71->gcu_addr, GLB_CORE_INFO); 314 + d71->num_blocks = value & 0xFF; 315 + d71->num_pipelines = (value >> 8) & 0x7; 316 + 317 + if (d71->num_pipelines > D71_MAX_PIPELINE) { 318 + DRM_ERROR("d71 supports %d pipelines, but got: %d.\n", 319 + D71_MAX_PIPELINE, d71->num_pipelines); 320 + err = -EINVAL; 321 + goto err_cleanup; 322 + } 323 + 324 + /* probe PERIPH */ 325 + value = malidp_read32(d71->periph_addr, BLK_BLOCK_INFO); 326 + if (BLOCK_INFO_BLK_TYPE(value) != D71_BLK_TYPE_PERIPH) { 327 + DRM_ERROR("access blk periph but got blk: %d.\n", 328 + BLOCK_INFO_BLK_TYPE(value)); 329 + err = -EINVAL; 330 + goto err_cleanup; 331 + } 332 + 333 + value = malidp_read32(d71->periph_addr, PERIPH_CONFIGURATION_ID); 334 + 335 + d71->max_line_size = value & PERIPH_MAX_LINE_SIZE ? 4096 : 2048; 336 + d71->max_vsize = 4096; 337 + d71->num_rich_layers = value & PERIPH_NUM_RICH_LAYERS ? 2 : 1; 338 + d71->supports_dual_link = value & PERIPH_SPLIT_EN ? true : false; 339 + d71->integrates_tbu = value & PERIPH_TBU_EN ? true : false; 340 + 341 + for (i = 0; i < d71->num_pipelines; i++) { 342 + pipe = komeda_pipeline_add(mdev, sizeof(struct d71_pipeline), 343 + NULL); 344 + if (IS_ERR(pipe)) { 345 + err = PTR_ERR(pipe); 346 + goto err_cleanup; 347 + } 348 + d71->pipes[i] = to_d71_pipeline(pipe); 349 + } 350 + 351 + /* loop the register blks and probe */ 352 + i = 2; /* exclude GCU and PERIPH */ 353 + offset = D71_BLOCK_SIZE; /* skip GCU */ 354 + while (i < d71->num_blocks) { 355 + blk_base = mdev->reg_base + (offset >> 2); 356 + 357 + d71_read_block_header(blk_base, &blk); 358 + if (BLOCK_INFO_BLK_TYPE(blk.block_info) != D71_BLK_TYPE_RESERVED) { 359 + err = d71_probe_block(d71, &blk, blk_base); 360 + if (err) 361 + goto err_cleanup; 362 + i++; 363 + } 364 + 365 + offset += D71_BLOCK_SIZE; 366 + } 367 + 368 + DRM_DEBUG("total %d (out of %d) blocks are found.\n", 369 + i, d71->num_blocks); 370 + 371 + return 0; 372 + 373 + err_cleanup: 374 + d71_cleanup(mdev); 375 + return err; 14 376 } 15 377 16 378 #define __HW_ID(__group, __format) \ ··· 455 93 static struct komeda_dev_funcs d71_chip_funcs = { 456 94 .init_format_table = d71_init_fmt_tbl, 457 95 .enum_resources = d71_enum_resources, 458 - .cleanup = NULL, 96 + .cleanup = d71_cleanup, 97 + .irq_handler = d71_irq_handler, 98 + .enable_irq = d71_enable_irq, 99 + .disable_irq = d71_disable_irq, 459 100 }; 460 - 461 - #define GLB_ARCH_ID 0x000 462 - #define GLB_CORE_ID 0x004 463 - #define GLB_CORE_INFO 0x008 464 101 465 102 struct komeda_dev_funcs * 466 103 d71_identify(u32 __iomem *reg_base, struct komeda_chip_info *chip)
+50
drivers/gpu/drm/arm/display/komeda/d71/d71_dev.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * (C) COPYRIGHT 2018 ARM Limited. All rights reserved. 4 + * Author: James.Qian.Wang <james.qian.wang@arm.com> 5 + * 6 + */ 7 + #ifndef _D71_DEV_H_ 8 + #define _D71_DEV_H_ 9 + 10 + #include "komeda_dev.h" 11 + #include "komeda_pipeline.h" 12 + #include "d71_regs.h" 13 + 14 + struct d71_pipeline { 15 + struct komeda_pipeline base; 16 + 17 + /* d71 private pipeline blocks */ 18 + u32 __iomem *lpu_addr; 19 + u32 __iomem *cu_addr; 20 + u32 __iomem *dou_addr; 21 + u32 __iomem *dou_ft_coeff_addr; /* forward transform coeffs table */ 22 + }; 23 + 24 + struct d71_dev { 25 + struct komeda_dev *mdev; 26 + 27 + int num_blocks; 28 + int num_pipelines; 29 + int num_rich_layers; 30 + u32 max_line_size; 31 + u32 max_vsize; 32 + u32 supports_dual_link : 1; 33 + u32 integrates_tbu : 1; 34 + 35 + /* global register blocks */ 36 + u32 __iomem *gcu_addr; 37 + /* scaling coeffs table */ 38 + u32 __iomem *glb_scl_coeff_addr[D71_MAX_GLB_SCL_COEFF]; 39 + u32 __iomem *periph_addr; 40 + 41 + struct d71_pipeline *pipes[D71_MAX_PIPELINE]; 42 + }; 43 + 44 + #define to_d71_pipeline(x) container_of(x, struct d71_pipeline, base) 45 + 46 + int d71_probe_block(struct d71_dev *d71, 47 + struct block_header *blk, u32 __iomem *reg); 48 + void d71_read_block_header(u32 __iomem *reg, struct block_header *blk); 49 + 50 + #endif /* !_D71_DEV_H_ */
+530
drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * (C) COPYRIGHT 2018 ARM Limited. All rights reserved. 4 + * Author: James.Qian.Wang <james.qian.wang@arm.com> 5 + * 6 + */ 7 + #ifndef _D71_REG_H_ 8 + #define _D71_REG_H_ 9 + 10 + /* Common block registers offset */ 11 + #define BLK_BLOCK_INFO 0x000 12 + #define BLK_PIPELINE_INFO 0x004 13 + #define BLK_VALID_INPUT_ID0 0x020 14 + #define BLK_OUTPUT_ID0 0x060 15 + #define BLK_INPUT_ID0 0x080 16 + #define BLK_IRQ_RAW_STATUS 0x0A0 17 + #define BLK_IRQ_CLEAR 0x0A4 18 + #define BLK_IRQ_MASK 0x0A8 19 + #define BLK_IRQ_STATUS 0x0AC 20 + #define BLK_STATUS 0x0B0 21 + #define BLK_INFO 0x0C0 22 + #define BLK_CONTROL 0x0D0 23 + #define BLK_SIZE 0x0D4 24 + #define BLK_IN_SIZE 0x0E0 25 + 26 + #define BLK_P0_PTR_LOW 0x100 27 + #define BLK_P0_PTR_HIGH 0x104 28 + #define BLK_P0_STRIDE 0x108 29 + #define BLK_P1_PTR_LOW 0x110 30 + #define BLK_P1_PTR_HIGH 0x114 31 + #define BLK_P1_STRIDE 0x118 32 + #define BLK_P2_PTR_LOW 0x120 33 + #define BLK_P2_PTR_HIGH 0x124 34 + 35 + #define BLOCK_INFO_N_SUBBLKS(x) ((x) & 0x000F) 36 + #define BLOCK_INFO_BLK_ID(x) (((x) & 0x00F0) >> 4) 37 + #define BLOCK_INFO_BLK_TYPE(x) (((x) & 0xFF00) >> 8) 38 + #define BLOCK_INFO_INPUT_ID(x) ((x) & 0xFFF0) 39 + #define BLOCK_INFO_TYPE_ID(x) (((x) & 0x0FF0) >> 4) 40 + 41 + #define PIPELINE_INFO_N_OUTPUTS(x) ((x) & 0x000F) 42 + #define PIPELINE_INFO_N_VALID_INPUTS(x) (((x) & 0x0F00) >> 8) 43 + 44 + /* Common block control register bits */ 45 + #define BLK_CTRL_EN BIT(0) 46 + /* Common size macro */ 47 + #define HV_SIZE(h, v) (((h) & 0x1FFF) + (((v) & 0x1FFF) << 16)) 48 + #define HV_OFFSET(h, v) (((h) & 0xFFF) + (((v) & 0xFFF) << 16)) 49 + #define HV_CROP(h, v) (((h) & 0xFFF) + (((v) & 0xFFF) << 16)) 50 + 51 + /* AD_CONTROL register */ 52 + #define AD_CONTROL 0x160 53 + 54 + /* AD_CONTROL register bits */ 55 + #define AD_AEN BIT(0) 56 + #define AD_YT BIT(1) 57 + #define AD_BS BIT(2) 58 + #define AD_WB BIT(3) 59 + #define AD_TH BIT(4) 60 + 61 + /* Global Control Unit */ 62 + #define GLB_ARCH_ID 0x000 63 + #define GLB_CORE_ID 0x004 64 + #define GLB_CORE_INFO 0x008 65 + #define GLB_IRQ_STATUS 0x010 66 + 67 + #define GCU_CONFIG_VALID0 0x0D4 68 + #define GCU_CONFIG_VALID1 0x0D8 69 + 70 + /* GCU_CONTROL_BITS */ 71 + #define GCU_CONTROL_MODE(x) ((x) & 0x7) 72 + #define GCU_CONTROL_SRST BIT(16) 73 + 74 + /* GCU opmode */ 75 + #define INACTIVE_MODE 0 76 + #define TBU_CONNECT_MODE 1 77 + #define TBU_DISCONNECT_MODE 2 78 + #define DO0_ACTIVE_MODE 3 79 + #define DO1_ACTIVE_MODE 4 80 + #define DO01_ACTIVE_MODE 5 81 + 82 + /* GLB_IRQ_STATUS bits */ 83 + #define GLB_IRQ_STATUS_GCU BIT(0) 84 + #define GLB_IRQ_STATUS_LPU0 BIT(8) 85 + #define GLB_IRQ_STATUS_LPU1 BIT(9) 86 + #define GLB_IRQ_STATUS_ATU0 BIT(10) 87 + #define GLB_IRQ_STATUS_ATU1 BIT(11) 88 + #define GLB_IRQ_STATUS_ATU2 BIT(12) 89 + #define GLB_IRQ_STATUS_ATU3 BIT(13) 90 + #define GLB_IRQ_STATUS_CU0 BIT(16) 91 + #define GLB_IRQ_STATUS_CU1 BIT(17) 92 + #define GLB_IRQ_STATUS_DOU0 BIT(24) 93 + #define GLB_IRQ_STATUS_DOU1 BIT(25) 94 + 95 + #define GLB_IRQ_STATUS_PIPE0 (GLB_IRQ_STATUS_LPU0 |\ 96 + GLB_IRQ_STATUS_ATU0 |\ 97 + GLB_IRQ_STATUS_ATU1 |\ 98 + GLB_IRQ_STATUS_CU0 |\ 99 + GLB_IRQ_STATUS_DOU0) 100 + 101 + #define GLB_IRQ_STATUS_PIPE1 (GLB_IRQ_STATUS_LPU1 |\ 102 + GLB_IRQ_STATUS_ATU2 |\ 103 + GLB_IRQ_STATUS_ATU3 |\ 104 + GLB_IRQ_STATUS_CU1 |\ 105 + GLB_IRQ_STATUS_DOU1) 106 + 107 + #define GLB_IRQ_STATUS_ATU (GLB_IRQ_STATUS_ATU0 |\ 108 + GLB_IRQ_STATUS_ATU1 |\ 109 + GLB_IRQ_STATUS_ATU2 |\ 110 + GLB_IRQ_STATUS_ATU3) 111 + 112 + /* GCU_IRQ_BITS */ 113 + #define GCU_IRQ_CVAL0 BIT(0) 114 + #define GCU_IRQ_CVAL1 BIT(1) 115 + #define GCU_IRQ_MODE BIT(4) 116 + #define GCU_IRQ_ERR BIT(11) 117 + 118 + /* GCU_STATUS_BITS */ 119 + #define GCU_STATUS_MODE(x) ((x) & 0x7) 120 + #define GCU_STATUS_MERR BIT(4) 121 + #define GCU_STATUS_TCS0 BIT(8) 122 + #define GCU_STATUS_TCS1 BIT(9) 123 + #define GCU_STATUS_ACTIVE BIT(31) 124 + 125 + /* GCU_CONFIG_VALIDx BITS */ 126 + #define GCU_CONFIG_CVAL BIT(0) 127 + 128 + /* PERIPHERAL registers */ 129 + #define PERIPH_MAX_LINE_SIZE BIT(0) 130 + #define PERIPH_NUM_RICH_LAYERS BIT(4) 131 + #define PERIPH_SPLIT_EN BIT(8) 132 + #define PERIPH_TBU_EN BIT(12) 133 + #define PERIPH_AFBC_DMA_EN BIT(16) 134 + #define PERIPH_CONFIGURATION_ID 0x1D4 135 + 136 + /* LPU register */ 137 + #define LPU_TBU_STATUS 0x0B4 138 + #define LPU_RAXI_CONTROL 0x0D0 139 + #define LPU_WAXI_CONTROL 0x0D4 140 + #define LPU_TBU_CONTROL 0x0D8 141 + 142 + /* LPU_xAXI_CONTROL_BITS */ 143 + #define TO_RAXI_AOUTSTDCAPB(x) (x) 144 + #define TO_RAXI_BOUTSTDCAPB(x) ((x) << 8) 145 + #define TO_RAXI_BEN(x) ((x) << 15) 146 + #define TO_xAXI_BURSTLEN(x) ((x) << 16) 147 + #define TO_xAXI_AxQOS(x) ((x) << 24) 148 + #define TO_xAXI_ORD(x) ((x) << 31) 149 + #define TO_WAXI_OUTSTDCAPB(x) (x) 150 + 151 + #define RAXI_AOUTSTDCAPB_MASK 0x7F 152 + #define RAXI_BOUTSTDCAPB_MASK 0x7F00 153 + #define RAXI_BEN_MASK BIT(15) 154 + #define xAXI_BURSTLEN_MASK 0x3F0000 155 + #define xAXI_AxQOS_MASK 0xF000000 156 + #define xAXI_ORD_MASK BIT(31) 157 + #define WAXI_OUTSTDCAPB_MASK 0x3F 158 + 159 + /* LPU_TBU_CONTROL BITS */ 160 + #define TO_TBU_DOUTSTDCAPB(x) (x) 161 + #define TBU_DOUTSTDCAPB_MASK 0x3F 162 + 163 + /* LPU_IRQ_BITS */ 164 + #define LPU_IRQ_IBSY BIT(10) 165 + #define LPU_IRQ_ERR BIT(11) 166 + #define LPU_IRQ_EOW BIT(12) 167 + #define LPU_IRQ_PL0 BIT(13) 168 + 169 + /* LPU_STATUS_BITS */ 170 + #define LPU_STATUS_AXIED(x) ((x) & 0xF) 171 + #define LPU_STATUS_AXIE BIT(4) 172 + #define LPU_STATUS_AXIRP BIT(5) 173 + #define LPU_STATUS_AXIWP BIT(6) 174 + #define LPU_STATUS_ACE0 BIT(16) 175 + #define LPU_STATUS_ACE1 BIT(17) 176 + #define LPU_STATUS_ACE2 BIT(18) 177 + #define LPU_STATUS_ACE3 BIT(19) 178 + #define LPU_STATUS_ACTIVE BIT(31) 179 + 180 + #define AXIEID_MASK 0xF 181 + #define AXIE_MASK LPU_STATUS_AXIE 182 + #define AXIRP_MASK LPU_STATUS_AXIRP 183 + #define AXIWP_MASK LPU_STATUS_AXIWP 184 + 185 + #define FROM_AXIEID(reg) ((reg) & AXIEID_MASK) 186 + #define TO_AXIE(x) ((x) << 4) 187 + #define FROM_AXIRP(reg) (((reg) & AXIRP_MASK) >> 5) 188 + #define FROM_AXIWP(reg) (((reg) & AXIWP_MASK) >> 6) 189 + 190 + /* LPU_TBU_STATUS_BITS */ 191 + #define LPU_TBU_STATUS_TCF BIT(1) 192 + #define LPU_TBU_STATUS_TTNG BIT(2) 193 + #define LPU_TBU_STATUS_TITR BIT(8) 194 + #define LPU_TBU_STATUS_TEMR BIT(16) 195 + #define LPU_TBU_STATUS_TTF BIT(31) 196 + 197 + /* LPU_TBU_CONTROL BITS */ 198 + #define LPU_TBU_CTRL_TLBPEN BIT(16) 199 + 200 + /* CROSSBAR CONTROL BITS */ 201 + #define CBU_INPUT_CTRL_EN BIT(0) 202 + #define CBU_NUM_INPUT_IDS 5 203 + #define CBU_NUM_OUTPUT_IDS 5 204 + 205 + /* CU register */ 206 + #define CU_BG_COLOR 0x0DC 207 + #define CU_INPUT0_SIZE 0x0E0 208 + #define CU_INPUT0_OFFSET 0x0E4 209 + #define CU_INPUT0_CONTROL 0x0E8 210 + #define CU_INPUT1_SIZE 0x0F0 211 + #define CU_INPUT1_OFFSET 0x0F4 212 + #define CU_INPUT1_CONTROL 0x0F8 213 + #define CU_INPUT2_SIZE 0x100 214 + #define CU_INPUT2_OFFSET 0x104 215 + #define CU_INPUT2_CONTROL 0x108 216 + #define CU_INPUT3_SIZE 0x110 217 + #define CU_INPUT3_OFFSET 0x114 218 + #define CU_INPUT3_CONTROL 0x118 219 + #define CU_INPUT4_SIZE 0x120 220 + #define CU_INPUT4_OFFSET 0x124 221 + #define CU_INPUT4_CONTROL 0x128 222 + 223 + #define CU_PER_INPUT_REGS 4 224 + 225 + #define CU_NUM_INPUT_IDS 5 226 + #define CU_NUM_OUTPUT_IDS 1 227 + 228 + /* CU control register bits */ 229 + #define CU_CTRL_COPROC BIT(0) 230 + 231 + /* CU_IRQ_BITS */ 232 + #define CU_IRQ_OVR BIT(9) 233 + #define CU_IRQ_ERR BIT(11) 234 + 235 + /* CU_STATUS_BITS */ 236 + #define CU_STATUS_CPE BIT(0) 237 + #define CU_STATUS_ZME BIT(1) 238 + #define CU_STATUS_CFGE BIT(2) 239 + #define CU_STATUS_ACTIVE BIT(31) 240 + 241 + /* CU input control register bits */ 242 + #define CU_INPUT_CTRL_EN BIT(0) 243 + #define CU_INPUT_CTRL_PAD BIT(1) 244 + #define CU_INPUT_CTRL_PMUL BIT(2) 245 + #define CU_INPUT_CTRL_ALPHA(x) (((x) & 0xFF) << 8) 246 + 247 + /* DOU register */ 248 + 249 + /* DOU_IRQ_BITS */ 250 + #define DOU_IRQ_UND BIT(8) 251 + #define DOU_IRQ_ERR BIT(11) 252 + #define DOU_IRQ_PL0 BIT(13) 253 + #define DOU_IRQ_PL1 BIT(14) 254 + 255 + /* DOU_STATUS_BITS */ 256 + #define DOU_STATUS_DRIFTTO BIT(0) 257 + #define DOU_STATUS_FRAMETO BIT(1) 258 + #define DOU_STATUS_TETO BIT(2) 259 + #define DOU_STATUS_CSCE BIT(8) 260 + #define DOU_STATUS_ACTIVE BIT(31) 261 + 262 + /* Layer registers */ 263 + #define LAYER_INFO 0x0C0 264 + #define LAYER_R_CONTROL 0x0D4 265 + #define LAYER_FMT 0x0D8 266 + #define LAYER_LT_COEFFTAB 0x0DC 267 + #define LAYER_PALPHA 0x0E4 268 + 269 + #define LAYER_YUV_RGB_COEFF0 0x130 270 + 271 + #define LAYER_AD_H_CROP 0x164 272 + #define LAYER_AD_V_CROP 0x168 273 + 274 + #define LAYER_RGB_RGB_COEFF0 0x170 275 + 276 + /* L_CONTROL_BITS */ 277 + #define L_EN BIT(0) 278 + #define L_IT BIT(4) 279 + #define L_R2R BIT(5) 280 + #define L_FT BIT(6) 281 + #define L_ROT(x) (((x) & 3) << 8) 282 + #define L_HFLIP BIT(10) 283 + #define L_VFLIP BIT(11) 284 + #define L_TBU_EN BIT(16) 285 + #define L_A_RCACHE(x) (((x) & 0xF) << 28) 286 + #define L_ROT_R0 0 287 + #define L_ROT_R90 1 288 + #define L_ROT_R180 2 289 + #define L_ROT_R270 3 290 + 291 + /* LAYER_R_CONTROL BITS */ 292 + #define LR_CHI422_BILINEAR 0 293 + #define LR_CHI422_REPLICATION 1 294 + #define LR_CHI420_JPEG (0 << 2) 295 + #define LR_CHI420_MPEG (1 << 2) 296 + 297 + #define L_ITSEL(x) ((x) & 0xFFF) 298 + #define L_FTSEL(x) (((x) & 0xFFF) << 16) 299 + 300 + #define LAYER_PER_PLANE_REGS 4 301 + 302 + /* Layer_WR registers */ 303 + #define LAYER_WR_PROG_LINE 0x0D4 304 + #define LAYER_WR_FORMAT 0x0D8 305 + 306 + /* Layer_WR control bits */ 307 + #define LW_OFM BIT(4) 308 + #define LW_LALPHA(x) (((x) & 0xFF) << 8) 309 + #define LW_A_WCACHE(x) (((x) & 0xF) << 28) 310 + #define LW_TBU_EN BIT(16) 311 + 312 + #define AxCACHE_MASK 0xF0000000 313 + 314 + /* Layer AXI R/W cache setting */ 315 + #define AxCACHE_B BIT(0) /* Bufferable */ 316 + #define AxCACHE_M BIT(1) /* Modifiable */ 317 + #define AxCACHE_RA BIT(2) /* Read-Allocate */ 318 + #define AxCACHE_WA BIT(3) /* Write-Allocate */ 319 + 320 + /* Layer info bits */ 321 + #define L_INFO_RF BIT(0) 322 + #define L_INFO_CM BIT(1) 323 + #define L_INFO_ABUF_SIZE(x) (((x) >> 4) & 0x7) 324 + 325 + /* Scaler registers */ 326 + #define SC_COEFFTAB 0x0DC 327 + #define SC_OUT_SIZE 0x0E4 328 + #define SC_H_CROP 0x0E8 329 + #define SC_V_CROP 0x0EC 330 + #define SC_H_INIT_PH 0x0F0 331 + #define SC_H_DELTA_PH 0x0F4 332 + #define SC_V_INIT_PH 0x0F8 333 + #define SC_V_DELTA_PH 0x0FC 334 + #define SC_ENH_LIMITS 0x130 335 + #define SC_ENH_COEFF0 0x134 336 + 337 + #define SC_MAX_ENH_COEFF 9 338 + 339 + /* SC_CTRL_BITS */ 340 + #define SC_CTRL_SCL BIT(0) 341 + #define SC_CTRL_LS BIT(1) 342 + #define SC_CTRL_AP BIT(4) 343 + #define SC_CTRL_IENH BIT(8) 344 + #define SC_CTRL_RGBSM BIT(16) 345 + #define SC_CTRL_ASM BIT(17) 346 + 347 + #define SC_VTSEL(vtal) ((vtal) << 16) 348 + 349 + #define SC_NUM_INPUTS_IDS 1 350 + #define SC_NUM_OUTPUTS_IDS 1 351 + 352 + #define MG_NUM_INPUTS_IDS 2 353 + #define MG_NUM_OUTPUTS_IDS 1 354 + 355 + /* Merger registers */ 356 + #define MG_INPUT_ID0 BLK_INPUT_ID0 357 + #define MG_INPUT_ID1 (MG_INPUT_ID0 + 4) 358 + #define MG_SIZE BLK_SIZE 359 + 360 + /* Splitter registers */ 361 + #define SP_OVERLAP_SIZE 0xD8 362 + 363 + /* Backend registers */ 364 + #define BS_INFO 0x0C0 365 + #define BS_PROG_LINE 0x0D4 366 + #define BS_PREFETCH_LINE 0x0D8 367 + #define BS_BG_COLOR 0x0DC 368 + #define BS_ACTIVESIZE 0x0E0 369 + #define BS_HINTERVALS 0x0E4 370 + #define BS_VINTERVALS 0x0E8 371 + #define BS_SYNC 0x0EC 372 + #define BS_DRIFT_TO 0x100 373 + #define BS_FRAME_TO 0x104 374 + #define BS_TE_TO 0x108 375 + #define BS_T0_INTERVAL 0x110 376 + #define BS_T1_INTERVAL 0x114 377 + #define BS_T2_INTERVAL 0x118 378 + #define BS_CRC0_LOW 0x120 379 + #define BS_CRC0_HIGH 0x124 380 + #define BS_CRC1_LOW 0x128 381 + #define BS_CRC1_HIGH 0x12C 382 + #define BS_USER 0x130 383 + 384 + /* BS control register bits */ 385 + #define BS_CTRL_EN BIT(0) 386 + #define BS_CTRL_VM BIT(1) 387 + #define BS_CTRL_BM BIT(2) 388 + #define BS_CTRL_HMASK BIT(4) 389 + #define BS_CTRL_VD BIT(5) 390 + #define BS_CTRL_TE BIT(8) 391 + #define BS_CTRL_TS BIT(9) 392 + #define BS_CTRL_TM BIT(12) 393 + #define BS_CTRL_DL BIT(16) 394 + #define BS_CTRL_SBS BIT(17) 395 + #define BS_CTRL_CRC BIT(18) 396 + #define BS_CTRL_PM BIT(20) 397 + 398 + /* BS active size/intervals */ 399 + #define BS_H_INTVALS(hfp, hbp) (((hfp) & 0xFFF) + (((hbp) & 0x3FF) << 16)) 400 + #define BS_V_INTVALS(vfp, vbp) (((vfp) & 0x3FFF) + (((vbp) & 0xFF) << 16)) 401 + 402 + /* BS_SYNC bits */ 403 + #define BS_SYNC_HSW(x) ((x) & 0x3FF) 404 + #define BS_SYNC_HSP BIT(12) 405 + #define BS_SYNC_VSW(x) (((x) & 0xFF) << 16) 406 + #define BS_SYNC_VSP BIT(28) 407 + 408 + #define BS_NUM_INPUT_IDS 0 409 + #define BS_NUM_OUTPUT_IDS 0 410 + 411 + /* Image process registers */ 412 + #define IPS_DEPTH 0x0D8 413 + #define IPS_RGB_RGB_COEFF0 0x130 414 + #define IPS_RGB_YUV_COEFF0 0x170 415 + 416 + #define IPS_DEPTH_MARK 0xF 417 + 418 + /* IPS control register bits */ 419 + #define IPS_CTRL_RGB BIT(0) 420 + #define IPS_CTRL_FT BIT(4) 421 + #define IPS_CTRL_YUV BIT(8) 422 + #define IPS_CTRL_CHD422 BIT(9) 423 + #define IPS_CTRL_CHD420 BIT(10) 424 + #define IPS_CTRL_LPF BIT(11) 425 + #define IPS_CTRL_DITH BIT(12) 426 + #define IPS_CTRL_CLAMP BIT(16) 427 + #define IPS_CTRL_SBS BIT(17) 428 + 429 + /* IPS info register bits */ 430 + #define IPS_INFO_CHD420 BIT(10) 431 + 432 + #define IPS_NUM_INPUT_IDS 2 433 + #define IPS_NUM_OUTPUT_IDS 1 434 + 435 + /* FT_COEFF block registers */ 436 + #define FT_COEFF0 0x80 437 + #define GLB_IT_COEFF 0x80 438 + 439 + /* GLB_SC_COEFF registers */ 440 + #define GLB_SC_COEFF_ADDR 0x0080 441 + #define GLB_SC_COEFF_DATA 0x0084 442 + #define GLB_LT_COEFF_DATA 0x0080 443 + 444 + #define GLB_SC_COEFF_MAX_NUM 1024 445 + #define GLB_LT_COEFF_NUM 65 446 + /* GLB_SC_ADDR */ 447 + #define SC_COEFF_R_ADDR BIT(18) 448 + #define SC_COEFF_G_ADDR BIT(17) 449 + #define SC_COEFF_B_ADDR BIT(16) 450 + 451 + #define SC_COEFF_DATA(x, y) (((y) & 0xFFFF) | (((x) & 0xFFFF) << 16)) 452 + 453 + enum d71_blk_type { 454 + D71_BLK_TYPE_GCU = 0x00, 455 + D71_BLK_TYPE_LPU = 0x01, 456 + D71_BLK_TYPE_CU = 0x02, 457 + D71_BLK_TYPE_DOU = 0x03, 458 + D71_BLK_TYPE_AEU = 0x04, 459 + D71_BLK_TYPE_GLB_LT_COEFF = 0x05, 460 + D71_BLK_TYPE_GLB_SCL_COEFF = 0x06, /* SH/SV scaler coeff */ 461 + D71_BLK_TYPE_GLB_SC_COEFF = 0x07, 462 + D71_BLK_TYPE_PERIPH = 0x08, 463 + D71_BLK_TYPE_LPU_TRUSTED = 0x09, 464 + D71_BLK_TYPE_AEU_TRUSTED = 0x0A, 465 + D71_BLK_TYPE_LPU_LAYER = 0x10, 466 + D71_BLK_TYPE_LPU_WB_LAYER = 0x11, 467 + D71_BLK_TYPE_CU_SPLITTER = 0x20, 468 + D71_BLK_TYPE_CU_SCALER = 0x21, 469 + D71_BLK_TYPE_CU_MERGER = 0x22, 470 + D71_BLK_TYPE_DOU_IPS = 0x30, 471 + D71_BLK_TYPE_DOU_BS = 0x31, 472 + D71_BLK_TYPE_DOU_FT_COEFF = 0x32, 473 + D71_BLK_TYPE_AEU_DS = 0x40, 474 + D71_BLK_TYPE_AEU_AES = 0x41, 475 + D71_BLK_TYPE_RESERVED = 0xFF 476 + }; 477 + 478 + /* Constant of components */ 479 + #define D71_MAX_PIPELINE 2 480 + #define D71_PIPELINE_MAX_SCALERS 2 481 + #define D71_PIPELINE_MAX_LAYERS 4 482 + 483 + #define D71_MAX_GLB_IT_COEFF 3 484 + #define D71_MAX_GLB_SCL_COEFF 4 485 + 486 + #define D71_MAX_LAYERS_PER_LPU 4 487 + #define D71_BLOCK_MAX_INPUT 9 488 + #define D71_BLOCK_MAX_OUTPUT 5 489 + #define D71_MAX_SC_PER_CU 2 490 + 491 + #define D71_BLOCK_OFFSET_PERIPH 0xFE00 492 + #define D71_BLOCK_SIZE 0x0200 493 + 494 + #define D71_DEFAULT_PREPRETCH_LINE 5 495 + #define D71_BUS_WIDTH_16_BYTES 16 496 + 497 + #define D71_MIN_LINE_SIZE 64 498 + #define D71_MIN_VERTICAL_SIZE 64 499 + #define D71_SC_MIN_LIN_SIZE 4 500 + #define D71_SC_MIN_VERTICAL_SIZE 4 501 + #define D71_SC_MAX_LIN_SIZE 2048 502 + #define D71_SC_MAX_VERTICAL_SIZE 4096 503 + 504 + #define D71_SC_MAX_UPSCALING 64 505 + #define D71_SC_MAX_DOWNSCALING 6 506 + #define D71_SC_SPLIT_OVERLAP 8 507 + #define D71_SC_ENH_SPLIT_OVERLAP 1 508 + 509 + #define D71_MG_MIN_MERGED_SIZE 4 510 + #define D71_MG_MAX_MERGED_HSIZE 4032 511 + #define D71_MG_MAX_MERGED_VSIZE 4096 512 + 513 + #define D71_PALPHA_DEF_MAP 0xFFAA5500 514 + #define D71_LAYER_CONTROL_DEFAULT 0x30000000 515 + #define D71_WB_LAYER_CONTROL_DEFAULT 0x3000FF00 516 + #define D71_BS_CONTROL_DEFAULT 0x00000002 517 + 518 + struct block_header { 519 + u32 block_info; 520 + u32 pipeline_info; 521 + u32 input_ids[D71_BLOCK_MAX_INPUT]; 522 + u32 output_ids[D71_BLOCK_MAX_OUTPUT]; 523 + }; 524 + 525 + static inline u32 get_block_type(struct block_header *blk) 526 + { 527 + return BLOCK_INFO_BLK_TYPE(blk->block_info); 528 + } 529 + 530 + #endif /* !_D71_REG_H_ */
+18
drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
··· 18 18 #include "komeda_dev.h" 19 19 #include "komeda_kms.h" 20 20 21 + void komeda_crtc_handle_event(struct komeda_crtc *kcrtc, 22 + struct komeda_events *evts) 23 + { 24 + struct drm_crtc *crtc = &kcrtc->base; 25 + u32 events = evts->pipes[kcrtc->master->id]; 26 + 27 + if (events & KOMEDA_EVENT_VSYNC) 28 + drm_crtc_handle_vblank(crtc); 29 + 30 + /* will handle it together with the write back support */ 31 + if (events & KOMEDA_EVENT_EOW) 32 + DRM_DEBUG("EOW.\n"); 33 + 34 + /* will handle it with crtc->flush */ 35 + if (events & KOMEDA_EVENT_FLIP) 36 + DRM_DEBUG("FLIP Done.\n"); 37 + } 38 + 21 39 struct drm_crtc_helper_funcs komeda_crtc_helper_funcs = { 22 40 }; 23 41
+66
drivers/gpu/drm/arm/display/komeda/komeda_dev.c
··· 8 8 #include <linux/of_device.h> 9 9 #include <linux/of_graph.h> 10 10 #include <linux/platform_device.h> 11 + #ifdef CONFIG_DEBUG_FS 12 + #include <linux/debugfs.h> 13 + #include <linux/seq_file.h> 14 + #endif 11 15 12 16 #include <drm/drm_print.h> 13 17 14 18 #include "komeda_dev.h" 19 + 20 + static int komeda_register_show(struct seq_file *sf, void *x) 21 + { 22 + struct komeda_dev *mdev = sf->private; 23 + int i; 24 + 25 + if (mdev->funcs->dump_register) 26 + mdev->funcs->dump_register(mdev, sf); 27 + 28 + for (i = 0; i < mdev->n_pipelines; i++) 29 + komeda_pipeline_dump_register(mdev->pipelines[i], sf); 30 + 31 + return 0; 32 + } 33 + 34 + static int komeda_register_open(struct inode *inode, struct file *filp) 35 + { 36 + return single_open(filp, komeda_register_show, inode->i_private); 37 + } 38 + 39 + static const struct file_operations komeda_register_fops = { 40 + .owner = THIS_MODULE, 41 + .open = komeda_register_open, 42 + .read = seq_read, 43 + .llseek = seq_lseek, 44 + .release = single_release, 45 + }; 46 + 47 + #ifdef CONFIG_DEBUG_FS 48 + static void komeda_debugfs_init(struct komeda_dev *mdev) 49 + { 50 + if (!debugfs_initialized()) 51 + return; 52 + 53 + mdev->debugfs_root = debugfs_create_dir("komeda", NULL); 54 + if (IS_ERR_OR_NULL(mdev->debugfs_root)) 55 + return; 56 + 57 + debugfs_create_file("register", 0444, mdev->debugfs_root, 58 + mdev, &komeda_register_fops); 59 + } 60 + #endif 15 61 16 62 static int komeda_parse_pipe_dt(struct komeda_dev *mdev, struct device_node *np) 17 63 { ··· 99 53 100 54 static int komeda_parse_dt(struct device *dev, struct komeda_dev *mdev) 101 55 { 56 + struct platform_device *pdev = to_platform_device(dev); 102 57 struct device_node *child, *np = dev->of_node; 103 58 struct clk *clk; 104 59 int ret; ··· 109 62 return PTR_ERR(clk); 110 63 111 64 mdev->mclk = clk; 65 + mdev->irq = platform_get_irq(pdev, 0); 66 + if (mdev->irq < 0) { 67 + DRM_ERROR("could not get IRQ number.\n"); 68 + return mdev->irq; 69 + } 112 70 113 71 for_each_available_child_of_node(np, child) { 114 72 if (of_node_cmp(child->name, "pipeline") == 0) { ··· 199 147 goto err_cleanup; 200 148 } 201 149 150 + err = komeda_assemble_pipelines(mdev); 151 + if (err) { 152 + DRM_ERROR("assemble display pipelines failed.\n"); 153 + goto err_cleanup; 154 + } 155 + 156 + #ifdef CONFIG_DEBUG_FS 157 + komeda_debugfs_init(mdev); 158 + #endif 159 + 202 160 return mdev; 203 161 204 162 err_cleanup: ··· 221 159 struct device *dev = mdev->dev; 222 160 struct komeda_dev_funcs *funcs = mdev->funcs; 223 161 int i; 162 + 163 + #ifdef CONFIG_DEBUG_FS 164 + debugfs_remove_recursive(mdev->debugfs_root); 165 + #endif 224 166 225 167 for (i = 0; i < mdev->n_pipelines; i++) { 226 168 komeda_pipeline_destroy(mdev, mdev->pipelines[i]);
+51
drivers/gpu/drm/arm/display/komeda/komeda_dev.h
··· 13 13 #include "malidp_product.h" 14 14 #include "komeda_format_caps.h" 15 15 16 + #define KOMEDA_EVENT_VSYNC BIT_ULL(0) 17 + #define KOMEDA_EVENT_FLIP BIT_ULL(1) 18 + #define KOMEDA_EVENT_URUN BIT_ULL(2) 19 + #define KOMEDA_EVENT_IBSY BIT_ULL(3) 20 + #define KOMEDA_EVENT_OVR BIT_ULL(4) 21 + #define KOMEDA_EVENT_EOW BIT_ULL(5) 22 + #define KOMEDA_EVENT_MODE BIT_ULL(6) 23 + 24 + #define KOMEDA_ERR_TETO BIT_ULL(14) 25 + #define KOMEDA_ERR_TEMR BIT_ULL(15) 26 + #define KOMEDA_ERR_TITR BIT_ULL(16) 27 + #define KOMEDA_ERR_CPE BIT_ULL(17) 28 + #define KOMEDA_ERR_CFGE BIT_ULL(18) 29 + #define KOMEDA_ERR_AXIE BIT_ULL(19) 30 + #define KOMEDA_ERR_ACE0 BIT_ULL(20) 31 + #define KOMEDA_ERR_ACE1 BIT_ULL(21) 32 + #define KOMEDA_ERR_ACE2 BIT_ULL(22) 33 + #define KOMEDA_ERR_ACE3 BIT_ULL(23) 34 + #define KOMEDA_ERR_DRIFTTO BIT_ULL(24) 35 + #define KOMEDA_ERR_FRAMETO BIT_ULL(25) 36 + #define KOMEDA_ERR_CSCE BIT_ULL(26) 37 + #define KOMEDA_ERR_ZME BIT_ULL(27) 38 + #define KOMEDA_ERR_MERR BIT_ULL(28) 39 + #define KOMEDA_ERR_TCF BIT_ULL(29) 40 + #define KOMEDA_ERR_TTNG BIT_ULL(30) 41 + #define KOMEDA_ERR_TTF BIT_ULL(31) 42 + 16 43 /* malidp device id */ 17 44 enum { 18 45 MALI_D71 = 0, ··· 66 39 67 40 struct komeda_dev; 68 41 42 + struct komeda_events { 43 + u64 global; 44 + u64 pipes[KOMEDA_MAX_PIPELINES]; 45 + }; 46 + 69 47 /** 70 48 * struct komeda_dev_funcs 71 49 * ··· 92 60 int (*enum_resources)(struct komeda_dev *mdev); 93 61 /** @cleanup: call to chip to cleanup komeda_dev->chip data */ 94 62 void (*cleanup)(struct komeda_dev *mdev); 63 + /** 64 + * @irq_handler: 65 + * 66 + * for CORE to get the HW event from the CHIP when interrupt happened. 67 + */ 68 + irqreturn_t (*irq_handler)(struct komeda_dev *mdev, 69 + struct komeda_events *events); 70 + /** @enable_irq: enable irq */ 71 + int (*enable_irq)(struct komeda_dev *mdev); 72 + /** @disable_irq: disable irq */ 73 + int (*disable_irq)(struct komeda_dev *mdev); 74 + 75 + /** @dump_register: Optional, dump registers to seq_file */ 76 + void (*dump_register)(struct komeda_dev *mdev, struct seq_file *seq); 95 77 }; 96 78 97 79 /** ··· 127 81 /** @mck: HW main engine clk */ 128 82 struct clk *mclk; 129 83 84 + /** @irq: irq number */ 85 + int irq; 86 + 130 87 int n_pipelines; 131 88 struct komeda_pipeline *pipelines[KOMEDA_MAX_PIPELINES]; 132 89 ··· 142 93 * destroyed by &komeda_dev_funcs.cleanup() 143 94 */ 144 95 void *chip_data; 96 + 97 + struct dentry *debugfs_root; 145 98 }; 146 99 147 100 static inline bool
+36 -2
drivers/gpu/drm/arm/display/komeda/komeda_kms.c
··· 13 13 #include <drm/drm_fb_helper.h> 14 14 #include <drm/drm_gem_cma_helper.h> 15 15 #include <drm/drm_gem_framebuffer_helper.h> 16 + #include <drm/drm_irq.h> 16 17 #include <drm/drm_vblank.h> 17 18 18 19 #include "komeda_dev.h" ··· 34 33 return drm_gem_cma_dumb_create_internal(file, dev, args); 35 34 } 36 35 36 + static irqreturn_t komeda_kms_irq_handler(int irq, void *data) 37 + { 38 + struct drm_device *drm = data; 39 + struct komeda_dev *mdev = drm->dev_private; 40 + struct komeda_kms_dev *kms = to_kdev(drm); 41 + struct komeda_events evts; 42 + irqreturn_t status; 43 + u32 i; 44 + 45 + /* Call into the CHIP to recognize events */ 46 + memset(&evts, 0, sizeof(evts)); 47 + status = mdev->funcs->irq_handler(mdev, &evts); 48 + 49 + /* Notify the crtc to handle the events */ 50 + for (i = 0; i < kms->n_crtcs; i++) 51 + komeda_crtc_handle_event(&kms->crtcs[i], &evts); 52 + 53 + return status; 54 + } 55 + 37 56 static struct drm_driver komeda_kms_driver = { 38 57 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC | 39 - DRIVER_PRIME, 58 + DRIVER_PRIME | DRIVER_HAVE_IRQ, 40 59 .lastclose = drm_fb_helper_lastclose, 60 + .irq_handler = komeda_kms_irq_handler, 41 61 .gem_free_object_unlocked = drm_gem_cma_free_object, 42 62 .gem_vm_ops = &drm_gem_cma_vm_ops, 43 63 .dumb_create = komeda_gem_cma_dumb_create, ··· 166 144 167 145 drm_mode_config_reset(drm); 168 146 169 - err = drm_dev_register(drm, 0); 147 + err = drm_irq_install(drm, mdev->irq); 170 148 if (err) 171 149 goto cleanup_mode_config; 172 150 151 + err = mdev->funcs->enable_irq(mdev); 152 + if (err) 153 + goto uninstall_irq; 154 + 155 + err = drm_dev_register(drm, 0); 156 + if (err) 157 + goto uninstall_irq; 158 + 173 159 return kms; 174 160 161 + uninstall_irq: 162 + drm_irq_uninstall(drm); 175 163 cleanup_mode_config: 176 164 drm_mode_config_cleanup(drm); 177 165 free_kms: ··· 194 162 struct drm_device *drm = &kms->base; 195 163 struct komeda_dev *mdev = drm->dev_private; 196 164 165 + mdev->funcs->disable_irq(mdev); 197 166 drm_dev_unregister(drm); 167 + drm_irq_uninstall(drm); 198 168 component_unbind_all(mdev->dev, drm); 199 169 komeda_kms_cleanup_private_objs(mdev); 200 170 drm_mode_config_cleanup(drm);
+5
drivers/gpu/drm/arm/display/komeda/komeda_kms.h
··· 12 12 #include <drm/drm_crtc_helper.h> 13 13 #include <drm/drm_device.h> 14 14 #include <drm/drm_writeback.h> 15 + #include <video/videomode.h> 16 + #include <video/display_timing.h> 15 17 16 18 /** struct komeda_plane - komeda instance of drm_plane */ 17 19 struct komeda_plane { ··· 109 107 int komeda_kms_add_private_objs(struct komeda_kms_dev *kms, 110 108 struct komeda_dev *mdev); 111 109 void komeda_kms_cleanup_private_objs(struct komeda_dev *mdev); 110 + 111 + void komeda_crtc_handle_event(struct komeda_crtc *kcrtc, 112 + struct komeda_events *evts); 112 113 113 114 struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev); 114 115 void komeda_kms_detach(struct komeda_kms_dev *kms);
+103 -8
drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
··· 19 19 if (mdev->n_pipelines + 1 > KOMEDA_MAX_PIPELINES) { 20 20 DRM_ERROR("Exceed max support %d pipelines.\n", 21 21 KOMEDA_MAX_PIPELINES); 22 - return NULL; 22 + return ERR_PTR(-ENOSPC); 23 23 } 24 24 25 25 if (size < sizeof(*pipe)) { 26 26 DRM_ERROR("Request pipeline size too small.\n"); 27 - return NULL; 27 + return ERR_PTR(-EINVAL); 28 28 } 29 29 30 30 pipe = devm_kzalloc(mdev->dev, size, GFP_KERNEL); 31 31 if (!pipe) 32 - return NULL; 32 + return ERR_PTR(-ENOMEM); 33 33 34 34 pipe->mdev = mdev; 35 35 pipe->id = mdev->n_pipelines; ··· 142 142 if (max_active_inputs > KOMEDA_COMPONENT_N_INPUTS) { 143 143 WARN(1, "please large KOMEDA_COMPONENT_N_INPUTS to %d.\n", 144 144 max_active_inputs); 145 - return NULL; 145 + return ERR_PTR(-ENOSPC); 146 146 } 147 147 148 148 pos = komeda_pipeline_get_component_pos(pipe, id); 149 149 if (!pos || (*pos)) 150 - return NULL; 150 + return ERR_PTR(-EINVAL); 151 151 152 152 if (has_bit(id, KOMEDA_PIPELINE_LAYERS)) { 153 153 idx = id - KOMEDA_COMPONENT_LAYER0; 154 154 num = &pipe->n_layers; 155 155 if (idx != pipe->n_layers) { 156 156 DRM_ERROR("please add Layer by id sequence.\n"); 157 - return NULL; 157 + return ERR_PTR(-EINVAL); 158 158 } 159 159 } else if (has_bit(id, KOMEDA_PIPELINE_SCALERS)) { 160 160 idx = id - KOMEDA_COMPONENT_SCALER0; 161 161 num = &pipe->n_scalers; 162 162 if (idx != pipe->n_scalers) { 163 163 DRM_ERROR("please add Scaler by id sequence.\n"); 164 - return NULL; 164 + return ERR_PTR(-EINVAL); 165 165 } 166 166 } 167 167 168 168 c = devm_kzalloc(pipe->mdev->dev, comp_sz, GFP_KERNEL); 169 169 if (!c) 170 - return NULL; 170 + return ERR_PTR(-ENOMEM); 171 171 172 172 c->id = id; 173 173 c->hw_id = hw_id; ··· 199 199 struct komeda_component *c) 200 200 { 201 201 devm_kfree(mdev->dev, c); 202 + } 203 + 204 + static void komeda_component_dump(struct komeda_component *c) 205 + { 206 + if (!c) 207 + return; 208 + 209 + DRM_DEBUG(" %s: ID %d-0x%08lx.\n", 210 + c->name, c->id, BIT(c->id)); 211 + DRM_DEBUG(" max_active_inputs:%d, supported_inputs: 0x%08x.\n", 212 + c->max_active_inputs, c->supported_inputs); 213 + DRM_DEBUG(" max_active_outputs:%d, supported_outputs: 0x%08x.\n", 214 + c->max_active_outputs, c->supported_outputs); 215 + } 216 + 217 + static void komeda_pipeline_dump(struct komeda_pipeline *pipe) 218 + { 219 + struct komeda_component *c; 220 + int id; 221 + 222 + DRM_INFO("Pipeline-%d: n_layers: %d, n_scalers: %d, output: %s\n", 223 + pipe->id, pipe->n_layers, pipe->n_scalers, 224 + pipe->of_output_dev ? pipe->of_output_dev->full_name : "none"); 225 + 226 + dp_for_each_set_bit(id, pipe->avail_comps) { 227 + c = komeda_pipeline_get_component(pipe, id); 228 + 229 + komeda_component_dump(c); 230 + } 231 + } 232 + 233 + static void komeda_component_verify_inputs(struct komeda_component *c) 234 + { 235 + struct komeda_pipeline *pipe = c->pipeline; 236 + struct komeda_component *input; 237 + int id; 238 + 239 + dp_for_each_set_bit(id, c->supported_inputs) { 240 + input = komeda_pipeline_get_component(pipe, id); 241 + if (!input) { 242 + c->supported_inputs &= ~(BIT(id)); 243 + DRM_WARN("Can not find input(ID-%d) for component: %s.\n", 244 + id, c->name); 245 + continue; 246 + } 247 + 248 + input->supported_outputs |= BIT(c->id); 249 + } 250 + } 251 + 252 + static void komeda_pipeline_assemble(struct komeda_pipeline *pipe) 253 + { 254 + struct komeda_component *c; 255 + int id; 256 + 257 + dp_for_each_set_bit(id, pipe->avail_comps) { 258 + c = komeda_pipeline_get_component(pipe, id); 259 + 260 + komeda_component_verify_inputs(c); 261 + } 262 + } 263 + 264 + int komeda_assemble_pipelines(struct komeda_dev *mdev) 265 + { 266 + struct komeda_pipeline *pipe; 267 + int i; 268 + 269 + for (i = 0; i < mdev->n_pipelines; i++) { 270 + pipe = mdev->pipelines[i]; 271 + 272 + komeda_pipeline_assemble(pipe); 273 + komeda_pipeline_dump(pipe); 274 + } 275 + 276 + return 0; 277 + } 278 + 279 + void komeda_pipeline_dump_register(struct komeda_pipeline *pipe, 280 + struct seq_file *sf) 281 + { 282 + struct komeda_component *c; 283 + u32 id; 284 + 285 + seq_printf(sf, "\n======== Pipeline-%d ==========\n", pipe->id); 286 + 287 + if (pipe->funcs && pipe->funcs->dump_register) 288 + pipe->funcs->dump_register(pipe, sf); 289 + 290 + dp_for_each_set_bit(id, pipe->avail_comps) { 291 + c = komeda_pipeline_get_component(pipe, id); 292 + 293 + seq_printf(sf, "\n------%s------\n", c->name); 294 + if (c->funcs->dump_register) 295 + c->funcs->dump_register(c, sf); 296 + } 202 297 }
+40 -14
drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
··· 204 204 return component_disabling_inputs(st) | st->changed_active_inputs; 205 205 } 206 206 207 + #define for_each_changed_input(st, i) \ 208 + for ((i) = 0; (i) < (st)->component->max_active_inputs; (i)++) \ 209 + if (has_bit((i), component_changed_inputs(st))) 210 + 207 211 #define to_comp(__c) (((__c) == NULL) ? NULL : &((__c)->base)) 208 212 #define to_cpos(__c) ((struct komeda_component **)&(__c)) 209 213 210 - /* these structures are going to be filled in in uture patches */ 211 214 struct komeda_layer { 212 215 struct komeda_component base; 213 - /* layer specific features and caps */ 214 - int layer_type; /* RICH, SIMPLE or WB */ 216 + /* accepted h/v input range before rotation */ 217 + struct malidp_range hsize_in, vsize_in; 218 + u32 layer_type; /* RICH, SIMPLE or WB */ 219 + u32 supported_rots; 215 220 }; 216 221 217 222 struct komeda_layer_state { 218 223 struct komeda_component_state base; 219 224 /* layer specific configuration state */ 220 - }; 221 - 222 - struct komeda_compiz { 223 - struct komeda_component base; 224 - /* compiz specific features and caps */ 225 - }; 226 - 227 - struct komeda_compiz_state { 228 - struct komeda_component_state base; 229 - /* compiz specific configuration state */ 225 + u16 hsize, vsize; 226 + u32 rot; 227 + dma_addr_t addr[3]; 230 228 }; 231 229 232 230 struct komeda_scaler { ··· 236 238 struct komeda_component_state base; 237 239 }; 238 240 241 + struct komeda_compiz { 242 + struct komeda_component base; 243 + struct malidp_range hsize, vsize; 244 + }; 245 + 246 + struct komeda_compiz_input_cfg { 247 + u16 hsize, vsize; 248 + u16 hoffset, voffset; 249 + u8 pixel_blend_mode, layer_alpha; 250 + }; 251 + 252 + struct komeda_compiz_state { 253 + struct komeda_component_state base; 254 + /* composition size */ 255 + u16 hsize, vsize; 256 + struct komeda_compiz_input_cfg cins[KOMEDA_COMPONENT_N_INPUTS]; 257 + }; 258 + 239 259 struct komeda_improc { 240 260 struct komeda_component base; 261 + u32 supported_color_formats; /* DRM_RGB/YUV444/YUV420*/ 262 + u32 supported_color_depths; /* BIT(8) | BIT(10)*/ 263 + u8 supports_degamma : 1; 264 + u8 supports_csc : 1; 265 + u8 supports_gamma : 1; 241 266 }; 242 267 243 268 struct komeda_improc_state { 244 269 struct komeda_component_state base; 270 + u16 hsize, vsize; 245 271 }; 246 272 247 273 /* display timing controller */ 248 274 struct komeda_timing_ctrlr { 249 275 struct komeda_component base; 276 + u8 supports_dual_link : 1; 250 277 }; 251 278 252 279 struct komeda_timing_ctrlr_state { ··· 363 340 struct komeda_pipeline_funcs *funcs); 364 341 void komeda_pipeline_destroy(struct komeda_dev *mdev, 365 342 struct komeda_pipeline *pipe); 366 - 343 + int komeda_assemble_pipelines(struct komeda_dev *mdev); 367 344 struct komeda_component * 368 345 komeda_pipeline_get_component(struct komeda_pipeline *pipe, int id); 346 + 347 + void komeda_pipeline_dump_register(struct komeda_pipeline *pipe, 348 + struct seq_file *sf); 369 349 370 350 /* component APIs */ 371 351 struct komeda_component *