Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'mediatek-drm-next-5.6' of https://github.com/ckhu-mediatek/linux.git-tags into drm-next

Mediatek DRM Next for Linux 5.6

This fix non-smooth cursor problem, add cmdq support, add ctm property
support and some refinement.

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: CK Hu <ck.hu@mediatek.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1578972526.14594.8.camel@mtksdaap41

+665 -260
+1 -1
drivers/gpu/drm/mediatek/Makefile
··· 20 20 mediatek-drm-hdmi-objs := mtk_cec.o \ 21 21 mtk_hdmi.o \ 22 22 mtk_hdmi_ddc.o \ 23 - mtk_mt2701_hdmi_phy.o \ 23 + mtk_mt2701_hdmi_phy.o \ 24 24 mtk_mt8173_hdmi_phy.o \ 25 25 mtk_hdmi_phy.o 26 26
+4 -3
drivers/gpu/drm/mediatek/mtk_disp_color.c
··· 9 9 #include <linux/of_device.h> 10 10 #include <linux/of_irq.h> 11 11 #include <linux/platform_device.h> 12 + #include <linux/soc/mediatek/mtk-cmdq.h> 12 13 13 14 #include "mtk_drm_crtc.h" 14 15 #include "mtk_drm_ddp_comp.h" ··· 46 45 47 46 static void mtk_color_config(struct mtk_ddp_comp *comp, unsigned int w, 48 47 unsigned int h, unsigned int vrefresh, 49 - unsigned int bpc) 48 + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) 50 49 { 51 50 struct mtk_disp_color *color = comp_to_color(comp); 52 51 53 - writel(w, comp->regs + DISP_COLOR_WIDTH(color)); 54 - writel(h, comp->regs + DISP_COLOR_HEIGHT(color)); 52 + mtk_ddp_write(cmdq_pkt, w, comp, DISP_COLOR_WIDTH(color)); 53 + mtk_ddp_write(cmdq_pkt, h, comp, DISP_COLOR_HEIGHT(color)); 55 54 } 56 55 57 56 static void mtk_color_start(struct mtk_ddp_comp *comp)
+39 -35
drivers/gpu/drm/mediatek/mtk_disp_ovl.c
··· 11 11 #include <linux/of_device.h> 12 12 #include <linux/of_irq.h> 13 13 #include <linux/platform_device.h> 14 + #include <linux/soc/mediatek/mtk-cmdq.h> 14 15 15 16 #include "mtk_drm_crtc.h" 16 17 #include "mtk_drm_ddp_comp.h" ··· 125 124 126 125 static void mtk_ovl_config(struct mtk_ddp_comp *comp, unsigned int w, 127 126 unsigned int h, unsigned int vrefresh, 128 - unsigned int bpc) 127 + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) 129 128 { 130 129 if (w != 0 && h != 0) 131 - writel_relaxed(h << 16 | w, comp->regs + DISP_REG_OVL_ROI_SIZE); 132 - writel_relaxed(0x0, comp->regs + DISP_REG_OVL_ROI_BGCLR); 130 + mtk_ddp_write_relaxed(cmdq_pkt, h << 16 | w, comp, 131 + DISP_REG_OVL_ROI_SIZE); 132 + mtk_ddp_write_relaxed(cmdq_pkt, 0x0, comp, DISP_REG_OVL_ROI_BGCLR); 133 133 134 - writel(0x1, comp->regs + DISP_REG_OVL_RST); 135 - writel(0x0, comp->regs + DISP_REG_OVL_RST); 134 + mtk_ddp_write(cmdq_pkt, 0x1, comp, DISP_REG_OVL_RST); 135 + mtk_ddp_write(cmdq_pkt, 0x0, comp, DISP_REG_OVL_RST); 136 136 } 137 137 138 138 static unsigned int mtk_ovl_layer_nr(struct mtk_ddp_comp *comp) ··· 177 175 return 0; 178 176 } 179 177 180 - static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx) 178 + static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx, 179 + struct cmdq_pkt *cmdq_pkt) 181 180 { 182 - unsigned int reg; 183 181 unsigned int gmc_thrshd_l; 184 182 unsigned int gmc_thrshd_h; 185 183 unsigned int gmc_value; 186 184 struct mtk_disp_ovl *ovl = comp_to_ovl(comp); 187 185 188 - writel(0x1, comp->regs + DISP_REG_OVL_RDMA_CTRL(idx)); 189 - 186 + mtk_ddp_write(cmdq_pkt, 0x1, comp, 187 + DISP_REG_OVL_RDMA_CTRL(idx)); 190 188 gmc_thrshd_l = GMC_THRESHOLD_LOW >> 191 189 (GMC_THRESHOLD_BITS - ovl->data->gmc_bits); 192 190 gmc_thrshd_h = GMC_THRESHOLD_HIGH >> ··· 196 194 else 197 195 gmc_value = gmc_thrshd_l | gmc_thrshd_l << 8 | 198 196 gmc_thrshd_h << 16 | gmc_thrshd_h << 24; 199 - writel(gmc_value, comp->regs + DISP_REG_OVL_RDMA_GMC(idx)); 200 - 201 - reg = readl(comp->regs + DISP_REG_OVL_SRC_CON); 202 - reg = reg | BIT(idx); 203 - writel(reg, comp->regs + DISP_REG_OVL_SRC_CON); 197 + mtk_ddp_write(cmdq_pkt, gmc_value, 198 + comp, DISP_REG_OVL_RDMA_GMC(idx)); 199 + mtk_ddp_write_mask(cmdq_pkt, BIT(idx), comp, 200 + DISP_REG_OVL_SRC_CON, BIT(idx)); 204 201 } 205 202 206 - static void mtk_ovl_layer_off(struct mtk_ddp_comp *comp, unsigned int idx) 203 + static void mtk_ovl_layer_off(struct mtk_ddp_comp *comp, unsigned int idx, 204 + struct cmdq_pkt *cmdq_pkt) 207 205 { 208 - unsigned int reg; 209 - 210 - reg = readl(comp->regs + DISP_REG_OVL_SRC_CON); 211 - reg = reg & ~BIT(idx); 212 - writel(reg, comp->regs + DISP_REG_OVL_SRC_CON); 213 - 214 - writel(0x0, comp->regs + DISP_REG_OVL_RDMA_CTRL(idx)); 206 + mtk_ddp_write_mask(cmdq_pkt, 0, comp, 207 + DISP_REG_OVL_SRC_CON, BIT(idx)); 208 + mtk_ddp_write(cmdq_pkt, 0, comp, 209 + DISP_REG_OVL_RDMA_CTRL(idx)); 215 210 } 216 211 217 212 static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt) ··· 248 249 } 249 250 250 251 static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx, 251 - struct mtk_plane_state *state) 252 + struct mtk_plane_state *state, 253 + struct cmdq_pkt *cmdq_pkt) 252 254 { 253 255 struct mtk_disp_ovl *ovl = comp_to_ovl(comp); 254 256 struct mtk_plane_pending_state *pending = &state->pending; ··· 260 260 unsigned int src_size = (pending->height << 16) | pending->width; 261 261 unsigned int con; 262 262 263 - if (!pending->enable) 264 - mtk_ovl_layer_off(comp, idx); 263 + if (!pending->enable) { 264 + mtk_ovl_layer_off(comp, idx, cmdq_pkt); 265 + return; 266 + } 265 267 266 268 con = ovl_fmt_convert(ovl, fmt); 267 - if (idx != 0) 269 + if (state->base.fb->format->has_alpha) 268 270 con |= OVL_CON_AEN | OVL_CON_ALPHA; 269 271 270 272 if (pending->rotation & DRM_MODE_REFLECT_Y) { ··· 279 277 addr += pending->pitch - 1; 280 278 } 281 279 282 - writel_relaxed(con, comp->regs + DISP_REG_OVL_CON(idx)); 283 - writel_relaxed(pitch, comp->regs + DISP_REG_OVL_PITCH(idx)); 284 - writel_relaxed(src_size, comp->regs + DISP_REG_OVL_SRC_SIZE(idx)); 285 - writel_relaxed(offset, comp->regs + DISP_REG_OVL_OFFSET(idx)); 286 - writel_relaxed(addr, comp->regs + DISP_REG_OVL_ADDR(ovl, idx)); 280 + mtk_ddp_write_relaxed(cmdq_pkt, con, comp, 281 + DISP_REG_OVL_CON(idx)); 282 + mtk_ddp_write_relaxed(cmdq_pkt, pitch, comp, 283 + DISP_REG_OVL_PITCH(idx)); 284 + mtk_ddp_write_relaxed(cmdq_pkt, src_size, comp, 285 + DISP_REG_OVL_SRC_SIZE(idx)); 286 + mtk_ddp_write_relaxed(cmdq_pkt, offset, comp, 287 + DISP_REG_OVL_OFFSET(idx)); 288 + mtk_ddp_write_relaxed(cmdq_pkt, addr, comp, 289 + DISP_REG_OVL_ADDR(ovl, idx)); 287 290 288 - if (pending->enable) 289 - mtk_ovl_layer_on(comp, idx); 291 + mtk_ovl_layer_on(comp, idx, cmdq_pkt); 290 292 } 291 293 292 294 static void mtk_ovl_bgclr_in_on(struct mtk_ddp_comp *comp) ··· 319 313 .disable_vblank = mtk_ovl_disable_vblank, 320 314 .supported_rotations = mtk_ovl_supported_rotations, 321 315 .layer_nr = mtk_ovl_layer_nr, 322 - .layer_on = mtk_ovl_layer_on, 323 - .layer_off = mtk_ovl_layer_off, 324 316 .layer_check = mtk_ovl_layer_check, 325 317 .layer_config = mtk_ovl_layer_config, 326 318 .bgclr_in_on = mtk_ovl_bgclr_in_on,
+25 -18
drivers/gpu/drm/mediatek/mtk_disp_rdma.c
··· 9 9 #include <linux/of_device.h> 10 10 #include <linux/of_irq.h> 11 11 #include <linux/platform_device.h> 12 + #include <linux/soc/mediatek/mtk-cmdq.h> 12 13 13 14 #include "mtk_drm_crtc.h" 14 15 #include "mtk_drm_ddp_comp.h" ··· 126 125 127 126 static void mtk_rdma_config(struct mtk_ddp_comp *comp, unsigned int width, 128 127 unsigned int height, unsigned int vrefresh, 129 - unsigned int bpc) 128 + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) 130 129 { 131 130 unsigned int threshold; 132 131 unsigned int reg; 133 132 struct mtk_disp_rdma *rdma = comp_to_rdma(comp); 134 133 135 - rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0, 0xfff, width); 136 - rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_1, 0xfffff, height); 134 + mtk_ddp_write_mask(cmdq_pkt, width, comp, 135 + DISP_REG_RDMA_SIZE_CON_0, 0xfff); 136 + mtk_ddp_write_mask(cmdq_pkt, height, comp, 137 + DISP_REG_RDMA_SIZE_CON_1, 0xfffff); 137 138 138 139 /* 139 140 * Enable FIFO underflow since DSI and DPI can't be blocked. ··· 147 144 reg = RDMA_FIFO_UNDERFLOW_EN | 148 145 RDMA_FIFO_PSEUDO_SIZE(RDMA_FIFO_SIZE(rdma)) | 149 146 RDMA_OUTPUT_VALID_FIFO_THRESHOLD(threshold); 150 - writel(reg, comp->regs + DISP_REG_RDMA_FIFO_CON); 147 + mtk_ddp_write(cmdq_pkt, reg, comp, DISP_REG_RDMA_FIFO_CON); 151 148 } 152 149 153 150 static unsigned int rdma_fmt_convert(struct mtk_disp_rdma *rdma, ··· 193 190 } 194 191 195 192 static void mtk_rdma_layer_config(struct mtk_ddp_comp *comp, unsigned int idx, 196 - struct mtk_plane_state *state) 193 + struct mtk_plane_state *state, 194 + struct cmdq_pkt *cmdq_pkt) 197 195 { 198 196 struct mtk_disp_rdma *rdma = comp_to_rdma(comp); 199 197 struct mtk_plane_pending_state *pending = &state->pending; ··· 204 200 unsigned int con; 205 201 206 202 con = rdma_fmt_convert(rdma, fmt); 207 - writel_relaxed(con, comp->regs + DISP_RDMA_MEM_CON); 203 + mtk_ddp_write_relaxed(cmdq_pkt, con, comp, DISP_RDMA_MEM_CON); 208 204 209 205 if (fmt == DRM_FORMAT_UYVY || fmt == DRM_FORMAT_YUYV) { 210 - rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0, 211 - RDMA_MATRIX_ENABLE, RDMA_MATRIX_ENABLE); 212 - rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0, 213 - RDMA_MATRIX_INT_MTX_SEL, 214 - RDMA_MATRIX_INT_MTX_BT601_to_RGB); 206 + mtk_ddp_write_mask(cmdq_pkt, RDMA_MATRIX_ENABLE, comp, 207 + DISP_REG_RDMA_SIZE_CON_0, 208 + RDMA_MATRIX_ENABLE); 209 + mtk_ddp_write_mask(cmdq_pkt, RDMA_MATRIX_INT_MTX_BT601_to_RGB, 210 + comp, DISP_REG_RDMA_SIZE_CON_0, 211 + RDMA_MATRIX_INT_MTX_SEL); 215 212 } else { 216 - rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0, 217 - RDMA_MATRIX_ENABLE, 0); 213 + mtk_ddp_write_mask(cmdq_pkt, 0, comp, 214 + DISP_REG_RDMA_SIZE_CON_0, 215 + RDMA_MATRIX_ENABLE); 218 216 } 217 + mtk_ddp_write_relaxed(cmdq_pkt, addr, comp, DISP_RDMA_MEM_START_ADDR); 218 + mtk_ddp_write_relaxed(cmdq_pkt, pitch, comp, DISP_RDMA_MEM_SRC_PITCH); 219 + mtk_ddp_write(cmdq_pkt, RDMA_MEM_GMC, comp, 220 + DISP_RDMA_MEM_GMC_SETTING_0); 221 + mtk_ddp_write_mask(cmdq_pkt, RDMA_MODE_MEMORY, comp, 222 + DISP_REG_RDMA_GLOBAL_CON, RDMA_MODE_MEMORY); 219 223 220 - writel_relaxed(addr, comp->regs + DISP_RDMA_MEM_START_ADDR); 221 - writel_relaxed(pitch, comp->regs + DISP_RDMA_MEM_SRC_PITCH); 222 - writel(RDMA_MEM_GMC, comp->regs + DISP_RDMA_MEM_GMC_SETTING_0); 223 - rdma_update_bits(comp, DISP_REG_RDMA_GLOBAL_CON, 224 - RDMA_MODE_MEMORY, RDMA_MODE_MEMORY); 225 224 } 226 225 227 226 static const struct mtk_ddp_comp_funcs mtk_disp_rdma_funcs = {
+169 -35
drivers/gpu/drm/mediatek/mtk_drm_crtc.c
··· 5 5 6 6 #include <linux/clk.h> 7 7 #include <linux/pm_runtime.h> 8 + #include <linux/soc/mediatek/mtk-cmdq.h> 8 9 9 10 #include <asm/barrier.h> 10 11 #include <soc/mediatek/smi.h> ··· 43 42 struct drm_plane *planes; 44 43 unsigned int layer_nr; 45 44 bool pending_planes; 45 + bool pending_async_planes; 46 + 47 + #if IS_REACHABLE(CONFIG_MTK_CMDQ) 48 + struct cmdq_client *cmdq_client; 49 + u32 cmdq_event; 50 + #endif 46 51 47 52 void __iomem *config_regs; 48 53 struct mtk_disp_mutex *mutex; 49 54 unsigned int ddp_comp_nr; 50 55 struct mtk_ddp_comp **ddp_comp; 56 + 57 + /* lock for display hardware access */ 58 + struct mutex hw_lock; 51 59 }; 52 60 53 61 struct mtk_crtc_state { ··· 225 215 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); 226 216 struct mtk_ddp_comp *comp; 227 217 int i, count = 0; 218 + unsigned int local_index = plane - mtk_crtc->planes; 228 219 229 220 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { 230 221 comp = mtk_crtc->ddp_comp[i]; 231 - if (plane->index < (count + mtk_ddp_comp_layer_nr(comp))) { 232 - *local_layer = plane->index - count; 222 + if (local_index < (count + mtk_ddp_comp_layer_nr(comp))) { 223 + *local_layer = local_index - count; 233 224 return comp; 234 225 } 235 226 count += mtk_ddp_comp_layer_nr(comp); ··· 239 228 WARN(1, "Failed to find component for plane %d\n", plane->index); 240 229 return NULL; 241 230 } 231 + 232 + #if IS_REACHABLE(CONFIG_MTK_CMDQ) 233 + static void ddp_cmdq_cb(struct cmdq_cb_data data) 234 + { 235 + cmdq_pkt_destroy(data.data); 236 + } 237 + #endif 242 238 243 239 static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc) 244 240 { ··· 315 297 if (i == 1) 316 298 mtk_ddp_comp_bgclr_in_on(comp); 317 299 318 - mtk_ddp_comp_config(comp, width, height, vrefresh, bpc); 300 + mtk_ddp_comp_config(comp, width, height, vrefresh, bpc, NULL); 319 301 mtk_ddp_comp_start(comp); 320 302 } 321 303 ··· 328 310 329 311 plane_state = to_mtk_plane_state(plane->state); 330 312 comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer); 331 - mtk_ddp_comp_layer_config(comp, local_layer, plane_state); 313 + if (comp) 314 + mtk_ddp_comp_layer_config(comp, local_layer, 315 + plane_state, NULL); 332 316 } 333 317 334 318 return 0; ··· 345 325 static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc) 346 326 { 347 327 struct drm_device *drm = mtk_crtc->base.dev; 328 + struct drm_crtc *crtc = &mtk_crtc->base; 348 329 int i; 349 330 350 331 DRM_DEBUG_DRIVER("%s\n", __func__); ··· 371 350 mtk_disp_mutex_unprepare(mtk_crtc->mutex); 372 351 373 352 pm_runtime_put(drm->dev); 353 + 354 + if (crtc->state->event && !crtc->state->active) { 355 + spin_lock_irq(&crtc->dev->event_lock); 356 + drm_crtc_send_vblank_event(crtc, crtc->state->event); 357 + crtc->state->event = NULL; 358 + spin_unlock_irq(&crtc->dev->event_lock); 359 + } 374 360 } 375 361 376 - static void mtk_crtc_ddp_config(struct drm_crtc *crtc) 362 + static void mtk_crtc_ddp_config(struct drm_crtc *crtc, 363 + struct cmdq_pkt *cmdq_handle) 377 364 { 378 365 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); 379 366 struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state); ··· 397 368 if (state->pending_config) { 398 369 mtk_ddp_comp_config(comp, state->pending_width, 399 370 state->pending_height, 400 - state->pending_vrefresh, 0); 371 + state->pending_vrefresh, 0, 372 + cmdq_handle); 401 373 402 374 state->pending_config = false; 403 375 } ··· 416 386 comp = mtk_drm_ddp_comp_for_plane(crtc, plane, 417 387 &local_layer); 418 388 419 - mtk_ddp_comp_layer_config(comp, local_layer, 420 - plane_state); 389 + if (comp) 390 + mtk_ddp_comp_layer_config(comp, local_layer, 391 + plane_state, 392 + cmdq_handle); 421 393 plane_state->pending.config = false; 422 394 } 423 395 mtk_crtc->pending_planes = false; 424 396 } 397 + 398 + if (mtk_crtc->pending_async_planes) { 399 + for (i = 0; i < mtk_crtc->layer_nr; i++) { 400 + struct drm_plane *plane = &mtk_crtc->planes[i]; 401 + struct mtk_plane_state *plane_state; 402 + 403 + plane_state = to_mtk_plane_state(plane->state); 404 + 405 + if (!plane_state->pending.async_config) 406 + continue; 407 + 408 + comp = mtk_drm_ddp_comp_for_plane(crtc, plane, 409 + &local_layer); 410 + 411 + if (comp) 412 + mtk_ddp_comp_layer_config(comp, local_layer, 413 + plane_state, 414 + cmdq_handle); 415 + plane_state->pending.async_config = false; 416 + } 417 + mtk_crtc->pending_async_planes = false; 418 + } 419 + } 420 + 421 + static void mtk_drm_crtc_hw_config(struct mtk_drm_crtc *mtk_crtc) 422 + { 423 + #if IS_REACHABLE(CONFIG_MTK_CMDQ) 424 + struct cmdq_pkt *cmdq_handle; 425 + #endif 426 + struct drm_crtc *crtc = &mtk_crtc->base; 427 + struct mtk_drm_private *priv = crtc->dev->dev_private; 428 + unsigned int pending_planes = 0, pending_async_planes = 0; 429 + int i; 430 + 431 + mutex_lock(&mtk_crtc->hw_lock); 432 + for (i = 0; i < mtk_crtc->layer_nr; i++) { 433 + struct drm_plane *plane = &mtk_crtc->planes[i]; 434 + struct mtk_plane_state *plane_state; 435 + 436 + plane_state = to_mtk_plane_state(plane->state); 437 + if (plane_state->pending.dirty) { 438 + plane_state->pending.config = true; 439 + plane_state->pending.dirty = false; 440 + pending_planes |= BIT(i); 441 + } else if (plane_state->pending.async_dirty) { 442 + plane_state->pending.async_config = true; 443 + plane_state->pending.async_dirty = false; 444 + pending_async_planes |= BIT(i); 445 + } 446 + } 447 + if (pending_planes) 448 + mtk_crtc->pending_planes = true; 449 + if (pending_async_planes) 450 + mtk_crtc->pending_async_planes = true; 451 + 452 + if (priv->data->shadow_register) { 453 + mtk_disp_mutex_acquire(mtk_crtc->mutex); 454 + mtk_crtc_ddp_config(crtc, NULL); 455 + mtk_disp_mutex_release(mtk_crtc->mutex); 456 + } 457 + #if IS_REACHABLE(CONFIG_MTK_CMDQ) 458 + if (mtk_crtc->cmdq_client) { 459 + cmdq_handle = cmdq_pkt_create(mtk_crtc->cmdq_client, PAGE_SIZE); 460 + cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event); 461 + cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event); 462 + mtk_crtc_ddp_config(crtc, cmdq_handle); 463 + cmdq_pkt_flush_async(cmdq_handle, ddp_cmdq_cb, cmdq_handle); 464 + } 465 + #endif 466 + mutex_unlock(&mtk_crtc->hw_lock); 425 467 } 426 468 427 469 int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane, ··· 503 401 struct mtk_ddp_comp *comp; 504 402 505 403 comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer); 506 - return mtk_ddp_comp_layer_check(comp, local_layer, state); 404 + if (comp) 405 + return mtk_ddp_comp_layer_check(comp, local_layer, state); 406 + return 0; 407 + } 408 + 409 + void mtk_drm_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane, 410 + struct drm_plane_state *new_state) 411 + { 412 + struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); 413 + const struct drm_plane_helper_funcs *plane_helper_funcs = 414 + plane->helper_private; 415 + 416 + if (!mtk_crtc->enabled) 417 + return; 418 + 419 + plane_helper_funcs->atomic_update(plane, new_state); 420 + mtk_drm_crtc_hw_config(mtk_crtc); 507 421 } 508 422 509 423 static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc, ··· 569 451 } 570 452 mtk_crtc->pending_planes = true; 571 453 454 + mtk_drm_crtc_hw_config(mtk_crtc); 572 455 /* Wait for planes to be disabled */ 573 456 drm_crtc_wait_one_vblank(crtc); 574 457 ··· 601 482 struct drm_crtc_state *old_crtc_state) 602 483 { 603 484 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); 604 - struct mtk_drm_private *priv = crtc->dev->dev_private; 605 - unsigned int pending_planes = 0; 606 485 int i; 607 486 608 487 if (mtk_crtc->event) 609 488 mtk_crtc->pending_needs_vblank = true; 610 - for (i = 0; i < mtk_crtc->layer_nr; i++) { 611 - struct drm_plane *plane = &mtk_crtc->planes[i]; 612 - struct mtk_plane_state *plane_state; 613 - 614 - plane_state = to_mtk_plane_state(plane->state); 615 - if (plane_state->pending.dirty) { 616 - plane_state->pending.config = true; 617 - plane_state->pending.dirty = false; 618 - pending_planes |= BIT(i); 619 - } 620 - } 621 - if (pending_planes) 622 - mtk_crtc->pending_planes = true; 623 489 if (crtc->state->color_mgmt_changed) 624 - for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) 490 + for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { 625 491 mtk_ddp_gamma_set(mtk_crtc->ddp_comp[i], crtc->state); 626 - 627 - if (priv->data->shadow_register) { 628 - mtk_disp_mutex_acquire(mtk_crtc->mutex); 629 - mtk_crtc_ddp_config(crtc); 630 - mtk_disp_mutex_release(mtk_crtc->mutex); 631 - } 492 + mtk_ddp_ctm_set(mtk_crtc->ddp_comp[i], crtc->state); 493 + } 494 + mtk_drm_crtc_hw_config(mtk_crtc); 632 495 } 633 496 634 497 static const struct drm_crtc_funcs mtk_crtc_funcs = { ··· 660 559 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); 661 560 struct mtk_drm_private *priv = crtc->dev->dev_private; 662 561 562 + #if IS_REACHABLE(CONFIG_MTK_CMDQ) 563 + if (!priv->data->shadow_register && !mtk_crtc->cmdq_client) 564 + #else 663 565 if (!priv->data->shadow_register) 664 - mtk_crtc_ddp_config(crtc); 566 + #endif 567 + mtk_crtc_ddp_config(crtc, NULL); 665 568 666 569 mtk_drm_finish_page_flip(mtk_crtc); 667 570 } ··· 732 627 int pipe = priv->num_pipes; 733 628 int ret; 734 629 int i; 630 + bool has_ctm = false; 631 + uint gamma_lut_size = 0; 735 632 736 633 if (!path) 737 634 return 0; ··· 784 677 } 785 678 786 679 mtk_crtc->ddp_comp[i] = comp; 680 + 681 + if (comp->funcs) { 682 + if (comp->funcs->gamma_set) 683 + gamma_lut_size = MTK_LUT_SIZE; 684 + 685 + if (comp->funcs->ctm_set) 686 + has_ctm = true; 687 + } 787 688 } 788 689 789 690 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) ··· 812 697 NULL, pipe); 813 698 if (ret < 0) 814 699 return ret; 815 - drm_mode_crtc_set_gamma_size(&mtk_crtc->base, MTK_LUT_SIZE); 816 - drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, false, MTK_LUT_SIZE); 817 - priv->num_pipes++; 818 700 701 + if (gamma_lut_size) 702 + drm_mode_crtc_set_gamma_size(&mtk_crtc->base, gamma_lut_size); 703 + drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, has_ctm, gamma_lut_size); 704 + priv->num_pipes++; 705 + mutex_init(&mtk_crtc->hw_lock); 706 + 707 + #if IS_REACHABLE(CONFIG_MTK_CMDQ) 708 + mtk_crtc->cmdq_client = 709 + cmdq_mbox_create(dev, drm_crtc_index(&mtk_crtc->base), 710 + 2000); 711 + if (IS_ERR(mtk_crtc->cmdq_client)) { 712 + dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n", 713 + drm_crtc_index(&mtk_crtc->base)); 714 + mtk_crtc->cmdq_client = NULL; 715 + } 716 + ret = of_property_read_u32_index(dev->of_node, "mediatek,gce-events", 717 + drm_crtc_index(&mtk_crtc->base), 718 + &mtk_crtc->cmdq_event); 719 + if (ret) 720 + dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n", 721 + drm_crtc_index(&mtk_crtc->base)); 722 + #endif 819 723 return 0; 820 724 }
+2
drivers/gpu/drm/mediatek/mtk_drm_crtc.h
··· 21 21 unsigned int path_len); 22 22 int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane, 23 23 struct mtk_plane_state *state); 24 + void mtk_drm_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane, 25 + struct drm_plane_state *plane_state); 24 26 25 27 #endif /* MTK_DRM_CRTC_H */
+155 -29
drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
··· 12 12 #include <linux/of_irq.h> 13 13 #include <linux/of_platform.h> 14 14 #include <linux/platform_device.h> 15 - 15 + #include <linux/soc/mediatek/mtk-cmdq.h> 16 16 #include "mtk_drm_drv.h" 17 17 #include "mtk_drm_plane.h" 18 18 #include "mtk_drm_ddp_comp.h" ··· 37 37 #define CCORR_EN BIT(0) 38 38 #define DISP_CCORR_CFG 0x0020 39 39 #define CCORR_RELAY_MODE BIT(0) 40 + #define CCORR_ENGINE_EN BIT(1) 41 + #define CCORR_GAMMA_OFF BIT(2) 42 + #define CCORR_WGAMUT_SRC_CLIP BIT(3) 40 43 #define DISP_CCORR_SIZE 0x0030 44 + #define DISP_CCORR_COEF_0 0x0080 45 + #define DISP_CCORR_COEF_1 0x0084 46 + #define DISP_CCORR_COEF_2 0x0088 47 + #define DISP_CCORR_COEF_3 0x008C 48 + #define DISP_CCORR_COEF_4 0x0090 41 49 42 50 #define DISP_DITHER_EN 0x0000 43 51 #define DITHER_EN BIT(0) ··· 84 76 #define DITHER_ADD_LSHIFT_G(x) (((x) & 0x7) << 4) 85 77 #define DITHER_ADD_RSHIFT_G(x) (((x) & 0x7) << 0) 86 78 79 + void mtk_ddp_write(struct cmdq_pkt *cmdq_pkt, unsigned int value, 80 + struct mtk_ddp_comp *comp, unsigned int offset) 81 + { 82 + #if IS_REACHABLE(CONFIG_MTK_CMDQ) 83 + if (cmdq_pkt) 84 + cmdq_pkt_write(cmdq_pkt, comp->subsys, 85 + comp->regs_pa + offset, value); 86 + else 87 + #endif 88 + writel(value, comp->regs + offset); 89 + } 90 + 91 + void mtk_ddp_write_relaxed(struct cmdq_pkt *cmdq_pkt, unsigned int value, 92 + struct mtk_ddp_comp *comp, 93 + unsigned int offset) 94 + { 95 + #if IS_REACHABLE(CONFIG_MTK_CMDQ) 96 + if (cmdq_pkt) 97 + cmdq_pkt_write(cmdq_pkt, comp->subsys, 98 + comp->regs_pa + offset, value); 99 + else 100 + #endif 101 + writel_relaxed(value, comp->regs + offset); 102 + } 103 + 104 + void mtk_ddp_write_mask(struct cmdq_pkt *cmdq_pkt, 105 + unsigned int value, 106 + struct mtk_ddp_comp *comp, 107 + unsigned int offset, 108 + unsigned int mask) 109 + { 110 + #if IS_REACHABLE(CONFIG_MTK_CMDQ) 111 + if (cmdq_pkt) { 112 + cmdq_pkt_write_mask(cmdq_pkt, comp->subsys, 113 + comp->regs_pa + offset, value, mask); 114 + } else { 115 + #endif 116 + u32 tmp = readl(comp->regs + offset); 117 + 118 + tmp = (tmp & ~mask) | (value & mask); 119 + writel(tmp, comp->regs + offset); 120 + #if IS_REACHABLE(CONFIG_MTK_CMDQ) 121 + } 122 + #endif 123 + } 124 + 87 125 void mtk_dither_set(struct mtk_ddp_comp *comp, unsigned int bpc, 88 - unsigned int CFG) 126 + unsigned int CFG, struct cmdq_pkt *cmdq_pkt) 89 127 { 90 128 /* If bpc equal to 0, the dithering function didn't be enabled */ 91 129 if (bpc == 0) 92 130 return; 93 131 94 132 if (bpc >= MTK_MIN_BPC) { 95 - writel(0, comp->regs + DISP_DITHER_5); 96 - writel(0, comp->regs + DISP_DITHER_7); 97 - writel(DITHER_LSB_ERR_SHIFT_R(MTK_MAX_BPC - bpc) | 98 - DITHER_ADD_LSHIFT_R(MTK_MAX_BPC - bpc) | 99 - DITHER_NEW_BIT_MODE, 100 - comp->regs + DISP_DITHER_15); 101 - writel(DITHER_LSB_ERR_SHIFT_B(MTK_MAX_BPC - bpc) | 102 - DITHER_ADD_LSHIFT_B(MTK_MAX_BPC - bpc) | 103 - DITHER_LSB_ERR_SHIFT_G(MTK_MAX_BPC - bpc) | 104 - DITHER_ADD_LSHIFT_G(MTK_MAX_BPC - bpc), 105 - comp->regs + DISP_DITHER_16); 106 - writel(DISP_DITHERING, comp->regs + CFG); 133 + mtk_ddp_write(cmdq_pkt, 0, comp, DISP_DITHER_5); 134 + mtk_ddp_write(cmdq_pkt, 0, comp, DISP_DITHER_7); 135 + mtk_ddp_write(cmdq_pkt, 136 + DITHER_LSB_ERR_SHIFT_R(MTK_MAX_BPC - bpc) | 137 + DITHER_ADD_LSHIFT_R(MTK_MAX_BPC - bpc) | 138 + DITHER_NEW_BIT_MODE, 139 + comp, DISP_DITHER_15); 140 + mtk_ddp_write(cmdq_pkt, 141 + DITHER_LSB_ERR_SHIFT_B(MTK_MAX_BPC - bpc) | 142 + DITHER_ADD_LSHIFT_B(MTK_MAX_BPC - bpc) | 143 + DITHER_LSB_ERR_SHIFT_G(MTK_MAX_BPC - bpc) | 144 + DITHER_ADD_LSHIFT_G(MTK_MAX_BPC - bpc), 145 + comp, DISP_DITHER_16); 146 + mtk_ddp_write(cmdq_pkt, DISP_DITHERING, comp, CFG); 107 147 } 108 148 } 109 149 110 150 static void mtk_od_config(struct mtk_ddp_comp *comp, unsigned int w, 111 151 unsigned int h, unsigned int vrefresh, 112 - unsigned int bpc) 152 + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) 113 153 { 114 - writel(w << 16 | h, comp->regs + DISP_OD_SIZE); 115 - writel(OD_RELAYMODE, comp->regs + DISP_OD_CFG); 116 - mtk_dither_set(comp, bpc, DISP_OD_CFG); 154 + mtk_ddp_write(cmdq_pkt, w << 16 | h, comp, DISP_OD_SIZE); 155 + mtk_ddp_write(cmdq_pkt, OD_RELAYMODE, comp, DISP_OD_CFG); 156 + mtk_dither_set(comp, bpc, DISP_OD_CFG, cmdq_pkt); 117 157 } 118 158 119 159 static void mtk_od_start(struct mtk_ddp_comp *comp) ··· 176 120 177 121 static void mtk_aal_config(struct mtk_ddp_comp *comp, unsigned int w, 178 122 unsigned int h, unsigned int vrefresh, 179 - unsigned int bpc) 123 + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) 180 124 { 181 - writel(h << 16 | w, comp->regs + DISP_AAL_SIZE); 125 + mtk_ddp_write(cmdq_pkt, h << 16 | w, comp, DISP_AAL_SIZE); 182 126 } 183 127 184 128 static void mtk_aal_start(struct mtk_ddp_comp *comp) ··· 193 137 194 138 static void mtk_ccorr_config(struct mtk_ddp_comp *comp, unsigned int w, 195 139 unsigned int h, unsigned int vrefresh, 196 - unsigned int bpc) 140 + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) 197 141 { 198 - writel(h << 16 | w, comp->regs + DISP_CCORR_SIZE); 199 - writel(CCORR_RELAY_MODE, comp->regs + DISP_CCORR_CFG); 142 + mtk_ddp_write(cmdq_pkt, h << 16 | w, comp, DISP_CCORR_SIZE); 143 + mtk_ddp_write(cmdq_pkt, CCORR_ENGINE_EN, comp, DISP_CCORR_CFG); 200 144 } 201 145 202 146 static void mtk_ccorr_start(struct mtk_ddp_comp *comp) ··· 209 153 writel_relaxed(0x0, comp->regs + DISP_CCORR_EN); 210 154 } 211 155 156 + /* Converts a DRM S31.32 value to the HW S1.10 format. */ 157 + static u16 mtk_ctm_s31_32_to_s1_10(u64 in) 158 + { 159 + u16 r; 160 + 161 + /* Sign bit. */ 162 + r = in & BIT_ULL(63) ? BIT(11) : 0; 163 + 164 + if ((in & GENMASK_ULL(62, 33)) > 0) { 165 + /* identity value 0x100000000 -> 0x400, */ 166 + /* if bigger this, set it to max 0x7ff. */ 167 + r |= GENMASK(10, 0); 168 + } else { 169 + /* take the 11 most important bits. */ 170 + r |= (in >> 22) & GENMASK(10, 0); 171 + } 172 + 173 + return r; 174 + } 175 + 176 + static void mtk_ccorr_ctm_set(struct mtk_ddp_comp *comp, 177 + struct drm_crtc_state *state) 178 + { 179 + struct drm_property_blob *blob = state->ctm; 180 + struct drm_color_ctm *ctm; 181 + const u64 *input; 182 + uint16_t coeffs[9] = { 0 }; 183 + int i; 184 + struct cmdq_pkt *cmdq_pkt = NULL; 185 + 186 + if (!blob) 187 + return; 188 + 189 + ctm = (struct drm_color_ctm *)blob->data; 190 + input = ctm->matrix; 191 + 192 + for (i = 0; i < ARRAY_SIZE(coeffs); i++) 193 + coeffs[i] = mtk_ctm_s31_32_to_s1_10(input[i]); 194 + 195 + mtk_ddp_write(cmdq_pkt, coeffs[0] << 16 | coeffs[1], 196 + comp, DISP_CCORR_COEF_0); 197 + mtk_ddp_write(cmdq_pkt, coeffs[2] << 16 | coeffs[3], 198 + comp, DISP_CCORR_COEF_1); 199 + mtk_ddp_write(cmdq_pkt, coeffs[4] << 16 | coeffs[5], 200 + comp, DISP_CCORR_COEF_2); 201 + mtk_ddp_write(cmdq_pkt, coeffs[6] << 16 | coeffs[7], 202 + comp, DISP_CCORR_COEF_3); 203 + mtk_ddp_write(cmdq_pkt, coeffs[8] << 16, 204 + comp, DISP_CCORR_COEF_4); 205 + } 206 + 212 207 static void mtk_dither_config(struct mtk_ddp_comp *comp, unsigned int w, 213 208 unsigned int h, unsigned int vrefresh, 214 - unsigned int bpc) 209 + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) 215 210 { 216 - writel(h << 16 | w, comp->regs + DISP_DITHER_SIZE); 217 - writel(DITHER_RELAY_MODE, comp->regs + DISP_DITHER_CFG); 211 + mtk_ddp_write(cmdq_pkt, h << 16 | w, comp, DISP_DITHER_SIZE); 212 + mtk_ddp_write(cmdq_pkt, DITHER_RELAY_MODE, comp, DISP_DITHER_CFG); 218 213 } 219 214 220 215 static void mtk_dither_start(struct mtk_ddp_comp *comp) ··· 280 173 281 174 static void mtk_gamma_config(struct mtk_ddp_comp *comp, unsigned int w, 282 175 unsigned int h, unsigned int vrefresh, 283 - unsigned int bpc) 176 + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) 284 177 { 285 - writel(h << 16 | w, comp->regs + DISP_GAMMA_SIZE); 286 - mtk_dither_set(comp, bpc, DISP_GAMMA_CFG); 178 + mtk_ddp_write(cmdq_pkt, h << 16 | w, comp, DISP_GAMMA_SIZE); 179 + mtk_dither_set(comp, bpc, DISP_GAMMA_CFG, cmdq_pkt); 287 180 } 288 181 289 182 static void mtk_gamma_start(struct mtk_ddp_comp *comp) ··· 330 223 .config = mtk_ccorr_config, 331 224 .start = mtk_ccorr_start, 332 225 .stop = mtk_ccorr_stop, 226 + .ctm_set = mtk_ccorr_ctm_set, 333 227 }; 334 228 335 229 static const struct mtk_ddp_comp_funcs ddp_dither = { ··· 434 326 enum mtk_ddp_comp_type type; 435 327 struct device_node *larb_node; 436 328 struct platform_device *larb_pdev; 329 + #if IS_REACHABLE(CONFIG_MTK_CMDQ) 330 + struct resource res; 331 + struct cmdq_client_reg cmdq_reg; 332 + int ret; 333 + #endif 437 334 438 335 if (comp_id < 0 || comp_id >= DDP_COMPONENT_ID_MAX) 439 336 return -EINVAL; ··· 492 379 493 380 comp->larb_dev = &larb_pdev->dev; 494 381 382 + #if IS_REACHABLE(CONFIG_MTK_CMDQ) 383 + if (of_address_to_resource(node, 0, &res) != 0) { 384 + dev_err(dev, "Missing reg in %s node\n", node->full_name); 385 + return -EINVAL; 386 + } 387 + comp->regs_pa = res.start; 388 + 389 + ret = cmdq_dev_get_client_reg(dev, &cmdq_reg, 0); 390 + if (ret) 391 + dev_dbg(dev, "get mediatek,gce-client-reg fail!\n"); 392 + else 393 + comp->subsys = cmdq_reg.subsys; 394 + #endif 495 395 return 0; 496 396 } 497 397
+31 -25
drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
··· 69 69 }; 70 70 71 71 struct mtk_ddp_comp; 72 - 72 + struct cmdq_pkt; 73 73 struct mtk_ddp_comp_funcs { 74 74 void (*config)(struct mtk_ddp_comp *comp, unsigned int w, 75 - unsigned int h, unsigned int vrefresh, unsigned int bpc); 75 + unsigned int h, unsigned int vrefresh, 76 + unsigned int bpc, struct cmdq_pkt *cmdq_pkt); 76 77 void (*start)(struct mtk_ddp_comp *comp); 77 78 void (*stop)(struct mtk_ddp_comp *comp); 78 79 void (*enable_vblank)(struct mtk_ddp_comp *comp, struct drm_crtc *crtc); 79 80 void (*disable_vblank)(struct mtk_ddp_comp *comp); 80 81 unsigned int (*supported_rotations)(struct mtk_ddp_comp *comp); 81 82 unsigned int (*layer_nr)(struct mtk_ddp_comp *comp); 82 - void (*layer_on)(struct mtk_ddp_comp *comp, unsigned int idx); 83 - void (*layer_off)(struct mtk_ddp_comp *comp, unsigned int idx); 84 83 int (*layer_check)(struct mtk_ddp_comp *comp, 85 84 unsigned int idx, 86 85 struct mtk_plane_state *state); 87 86 void (*layer_config)(struct mtk_ddp_comp *comp, unsigned int idx, 88 - struct mtk_plane_state *state); 87 + struct mtk_plane_state *state, 88 + struct cmdq_pkt *cmdq_pkt); 89 89 void (*gamma_set)(struct mtk_ddp_comp *comp, 90 90 struct drm_crtc_state *state); 91 91 void (*bgclr_in_on)(struct mtk_ddp_comp *comp); 92 92 void (*bgclr_in_off)(struct mtk_ddp_comp *comp); 93 + void (*ctm_set)(struct mtk_ddp_comp *comp, 94 + struct drm_crtc_state *state); 93 95 }; 94 96 95 97 struct mtk_ddp_comp { ··· 101 99 struct device *larb_dev; 102 100 enum mtk_ddp_comp_id id; 103 101 const struct mtk_ddp_comp_funcs *funcs; 102 + resource_size_t regs_pa; 103 + u8 subsys; 104 104 }; 105 105 106 106 static inline void mtk_ddp_comp_config(struct mtk_ddp_comp *comp, 107 107 unsigned int w, unsigned int h, 108 - unsigned int vrefresh, unsigned int bpc) 108 + unsigned int vrefresh, unsigned int bpc, 109 + struct cmdq_pkt *cmdq_pkt) 109 110 { 110 111 if (comp->funcs && comp->funcs->config) 111 - comp->funcs->config(comp, w, h, vrefresh, bpc); 112 + comp->funcs->config(comp, w, h, vrefresh, bpc, cmdq_pkt); 112 113 } 113 114 114 115 static inline void mtk_ddp_comp_start(struct mtk_ddp_comp *comp) ··· 156 151 return 0; 157 152 } 158 153 159 - static inline void mtk_ddp_comp_layer_on(struct mtk_ddp_comp *comp, 160 - unsigned int idx) 161 - { 162 - if (comp->funcs && comp->funcs->layer_on) 163 - comp->funcs->layer_on(comp, idx); 164 - } 165 - 166 - static inline void mtk_ddp_comp_layer_off(struct mtk_ddp_comp *comp, 167 - unsigned int idx) 168 - { 169 - if (comp->funcs && comp->funcs->layer_off) 170 - comp->funcs->layer_off(comp, idx); 171 - } 172 - 173 154 static inline int mtk_ddp_comp_layer_check(struct mtk_ddp_comp *comp, 174 155 unsigned int idx, 175 156 struct mtk_plane_state *state) ··· 167 176 168 177 static inline void mtk_ddp_comp_layer_config(struct mtk_ddp_comp *comp, 169 178 unsigned int idx, 170 - struct mtk_plane_state *state) 179 + struct mtk_plane_state *state, 180 + struct cmdq_pkt *cmdq_pkt) 171 181 { 172 182 if (comp->funcs && comp->funcs->layer_config) 173 - comp->funcs->layer_config(comp, idx, state); 183 + comp->funcs->layer_config(comp, idx, state, cmdq_pkt); 174 184 } 175 185 176 186 static inline void mtk_ddp_gamma_set(struct mtk_ddp_comp *comp, ··· 193 201 comp->funcs->bgclr_in_off(comp); 194 202 } 195 203 204 + static inline void mtk_ddp_ctm_set(struct mtk_ddp_comp *comp, 205 + struct drm_crtc_state *state) 206 + { 207 + if (comp->funcs && comp->funcs->ctm_set) 208 + comp->funcs->ctm_set(comp, state); 209 + } 210 + 196 211 int mtk_ddp_comp_get_id(struct device_node *node, 197 212 enum mtk_ddp_comp_type comp_type); 198 213 int mtk_ddp_comp_init(struct device *dev, struct device_node *comp_node, ··· 208 209 int mtk_ddp_comp_register(struct drm_device *drm, struct mtk_ddp_comp *comp); 209 210 void mtk_ddp_comp_unregister(struct drm_device *drm, struct mtk_ddp_comp *comp); 210 211 void mtk_dither_set(struct mtk_ddp_comp *comp, unsigned int bpc, 211 - unsigned int CFG); 212 - 212 + unsigned int CFG, struct cmdq_pkt *cmdq_pkt); 213 + enum mtk_ddp_comp_type mtk_ddp_comp_get_type(enum mtk_ddp_comp_id comp_id); 214 + void mtk_ddp_write(struct cmdq_pkt *cmdq_pkt, unsigned int value, 215 + struct mtk_ddp_comp *comp, unsigned int offset); 216 + void mtk_ddp_write_relaxed(struct cmdq_pkt *cmdq_pkt, unsigned int value, 217 + struct mtk_ddp_comp *comp, unsigned int offset); 218 + void mtk_ddp_write_mask(struct cmdq_pkt *cmdq_pkt, unsigned int value, 219 + struct mtk_ddp_comp *comp, unsigned int offset, 220 + unsigned int mask); 213 221 #endif /* MTK_DRM_DDP_COMP_H */
+5 -81
drivers/gpu/drm/mediatek/mtk_drm_drv.c
··· 37 37 #define DRIVER_MAJOR 1 38 38 #define DRIVER_MINOR 0 39 39 40 - static void mtk_atomic_schedule(struct mtk_drm_private *private, 41 - struct drm_atomic_state *state) 42 - { 43 - private->commit.state = state; 44 - schedule_work(&private->commit.work); 45 - } 46 - 47 - static void mtk_atomic_complete(struct mtk_drm_private *private, 48 - struct drm_atomic_state *state) 49 - { 50 - struct drm_device *drm = private->drm; 51 - 52 - drm_atomic_helper_wait_for_fences(drm, state, false); 53 - 54 - /* 55 - * Mediatek drm supports runtime PM, so plane registers cannot be 56 - * written when their crtc is disabled. 57 - * 58 - * The comment for drm_atomic_helper_commit states: 59 - * For drivers supporting runtime PM the recommended sequence is 60 - * 61 - * drm_atomic_helper_commit_modeset_disables(dev, state); 62 - * drm_atomic_helper_commit_modeset_enables(dev, state); 63 - * drm_atomic_helper_commit_planes(dev, state, 64 - * DRM_PLANE_COMMIT_ACTIVE_ONLY); 65 - * 66 - * See the kerneldoc entries for these three functions for more details. 67 - */ 68 - drm_atomic_helper_commit_modeset_disables(drm, state); 69 - drm_atomic_helper_commit_modeset_enables(drm, state); 70 - drm_atomic_helper_commit_planes(drm, state, 71 - DRM_PLANE_COMMIT_ACTIVE_ONLY); 72 - 73 - drm_atomic_helper_wait_for_vblanks(drm, state); 74 - 75 - drm_atomic_helper_cleanup_planes(drm, state); 76 - drm_atomic_state_put(state); 77 - } 78 - 79 - static void mtk_atomic_work(struct work_struct *work) 80 - { 81 - struct mtk_drm_private *private = container_of(work, 82 - struct mtk_drm_private, commit.work); 83 - 84 - mtk_atomic_complete(private, private->commit.state); 85 - } 86 - 87 - static int mtk_atomic_commit(struct drm_device *drm, 88 - struct drm_atomic_state *state, 89 - bool async) 90 - { 91 - struct mtk_drm_private *private = drm->dev_private; 92 - int ret; 93 - 94 - ret = drm_atomic_helper_prepare_planes(drm, state); 95 - if (ret) 96 - return ret; 97 - 98 - mutex_lock(&private->commit.lock); 99 - flush_work(&private->commit.work); 100 - 101 - ret = drm_atomic_helper_swap_state(state, true); 102 - if (ret) { 103 - mutex_unlock(&private->commit.lock); 104 - drm_atomic_helper_cleanup_planes(drm, state); 105 - return ret; 106 - } 107 - 108 - drm_atomic_state_get(state); 109 - if (async) 110 - mtk_atomic_schedule(private, state); 111 - else 112 - mtk_atomic_complete(private, state); 113 - 114 - mutex_unlock(&private->commit.lock); 115 - 116 - return 0; 117 - } 40 + static const struct drm_mode_config_helper_funcs mtk_drm_mode_config_helpers = { 41 + .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm, 42 + }; 118 43 119 44 static struct drm_framebuffer * 120 45 mtk_drm_mode_fb_create(struct drm_device *dev, ··· 57 132 static const struct drm_mode_config_funcs mtk_drm_mode_config_funcs = { 58 133 .fb_create = mtk_drm_mode_fb_create, 59 134 .atomic_check = drm_atomic_helper_check, 60 - .atomic_commit = mtk_atomic_commit, 135 + .atomic_commit = drm_atomic_helper_commit, 61 136 }; 62 137 63 138 static const enum mtk_ddp_comp_id mt2701_mtk_ddp_main[] = { ··· 175 250 drm->mode_config.max_width = 4096; 176 251 drm->mode_config.max_height = 4096; 177 252 drm->mode_config.funcs = &mtk_drm_mode_config_funcs; 253 + drm->mode_config.helper_private = &mtk_drm_mode_config_helpers; 178 254 179 255 ret = component_bind_all(drm->dev, drm); 180 256 if (ret) ··· 435 509 if (!private) 436 510 return -ENOMEM; 437 511 438 - mutex_init(&private->commit.lock); 439 - INIT_WORK(&private->commit.work, mtk_atomic_work); 440 512 private->data = of_device_get_match_data(dev); 441 513 442 514 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-7
drivers/gpu/drm/mediatek/mtk_drm_drv.h
··· 43 43 struct device_node *comp_node[DDP_COMPONENT_ID_MAX]; 44 44 struct mtk_ddp_comp *ddp_comp[DDP_COMPONENT_ID_MAX]; 45 45 const struct mtk_mmsys_driver_data *data; 46 - 47 - struct { 48 - struct drm_atomic_state *state; 49 - struct work_struct work; 50 - struct mutex lock; 51 - } commit; 52 - 53 46 struct drm_atomic_state *suspend_state; 54 47 55 48 bool dma_parms_allocated;
+47
drivers/gpu/drm/mediatek/mtk_drm_plane.c
··· 7 7 #include <drm/drm_atomic.h> 8 8 #include <drm/drm_atomic_helper.h> 9 9 #include <drm/drm_fourcc.h> 10 + #include <drm/drm_atomic_uapi.h> 10 11 #include <drm/drm_plane_helper.h> 11 12 #include <drm/drm_gem_framebuffer_helper.h> 12 13 ··· 74 73 { 75 74 __drm_atomic_helper_plane_destroy_state(state); 76 75 kfree(to_mtk_plane_state(state)); 76 + } 77 + 78 + static int mtk_plane_atomic_async_check(struct drm_plane *plane, 79 + struct drm_plane_state *state) 80 + { 81 + struct drm_crtc_state *crtc_state; 82 + 83 + if (plane != state->crtc->cursor) 84 + return -EINVAL; 85 + 86 + if (!plane->state) 87 + return -EINVAL; 88 + 89 + if (!plane->state->fb) 90 + return -EINVAL; 91 + 92 + if (state->state) 93 + crtc_state = drm_atomic_get_existing_crtc_state(state->state, 94 + state->crtc); 95 + else /* Special case for asynchronous cursor updates. */ 96 + crtc_state = state->crtc->state; 97 + 98 + return drm_atomic_helper_check_plane_state(plane->state, crtc_state, 99 + DRM_PLANE_HELPER_NO_SCALING, 100 + DRM_PLANE_HELPER_NO_SCALING, 101 + true, true); 102 + } 103 + 104 + static void mtk_plane_atomic_async_update(struct drm_plane *plane, 105 + struct drm_plane_state *new_state) 106 + { 107 + struct mtk_plane_state *state = to_mtk_plane_state(plane->state); 108 + 109 + plane->state->crtc_x = new_state->crtc_x; 110 + plane->state->crtc_y = new_state->crtc_y; 111 + plane->state->crtc_h = new_state->crtc_h; 112 + plane->state->crtc_w = new_state->crtc_w; 113 + plane->state->src_x = new_state->src_x; 114 + plane->state->src_y = new_state->src_y; 115 + plane->state->src_h = new_state->src_h; 116 + plane->state->src_w = new_state->src_w; 117 + state->pending.async_dirty = true; 118 + 119 + mtk_drm_crtc_async_update(new_state->crtc, plane, new_state); 77 120 } 78 121 79 122 static const struct drm_plane_funcs mtk_plane_funcs = { ··· 208 163 .atomic_check = mtk_plane_atomic_check, 209 164 .atomic_update = mtk_plane_atomic_update, 210 165 .atomic_disable = mtk_plane_atomic_disable, 166 + .atomic_async_update = mtk_plane_atomic_async_update, 167 + .atomic_async_check = mtk_plane_atomic_async_check, 211 168 }; 212 169 213 170 int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
+2
drivers/gpu/drm/mediatek/mtk_drm_plane.h
··· 22 22 unsigned int height; 23 23 unsigned int rotation; 24 24 bool dirty; 25 + bool async_dirty; 26 + bool async_config; 25 27 }; 26 28 27 29 struct mtk_plane_state {
+121 -26
drivers/soc/mediatek/mtk-cmdq-helper.c
··· 9 9 #include <linux/mailbox_controller.h> 10 10 #include <linux/soc/mediatek/mtk-cmdq.h> 11 11 12 - #define CMDQ_ARG_A_WRITE_MASK 0xffff 13 12 #define CMDQ_WRITE_ENABLE_MASK BIT(0) 13 + #define CMDQ_POLL_ENABLE_MASK BIT(0) 14 14 #define CMDQ_EOC_IRQ_EN BIT(0) 15 15 #define CMDQ_EOC_CMD ((u64)((CMDQ_CODE_EOC << CMDQ_OP_CODE_SHIFT)) \ 16 16 << 32 | CMDQ_EOC_IRQ_EN) 17 + 18 + struct cmdq_instruction { 19 + union { 20 + u32 value; 21 + u32 mask; 22 + }; 23 + union { 24 + u16 offset; 25 + u16 event; 26 + }; 27 + u8 subsys; 28 + u8 op; 29 + }; 30 + 31 + int cmdq_dev_get_client_reg(struct device *dev, 32 + struct cmdq_client_reg *client_reg, int idx) 33 + { 34 + struct of_phandle_args spec; 35 + int err; 36 + 37 + if (!client_reg) 38 + return -ENOENT; 39 + 40 + err = of_parse_phandle_with_fixed_args(dev->of_node, 41 + "mediatek,gce-client-reg", 42 + 3, idx, &spec); 43 + if (err < 0) { 44 + dev_err(dev, 45 + "error %d can't parse gce-client-reg property (%d)", 46 + err, idx); 47 + 48 + return err; 49 + } 50 + 51 + client_reg->subsys = (u8)spec.args[0]; 52 + client_reg->offset = (u16)spec.args[1]; 53 + client_reg->size = (u16)spec.args[2]; 54 + of_node_put(spec.np); 55 + 56 + return 0; 57 + } 58 + EXPORT_SYMBOL(cmdq_dev_get_client_reg); 17 59 18 60 static void cmdq_client_timeout(struct timer_list *t) 19 61 { ··· 152 110 } 153 111 EXPORT_SYMBOL(cmdq_pkt_destroy); 154 112 155 - static int cmdq_pkt_append_command(struct cmdq_pkt *pkt, enum cmdq_code code, 156 - u32 arg_a, u32 arg_b) 113 + static int cmdq_pkt_append_command(struct cmdq_pkt *pkt, 114 + struct cmdq_instruction inst) 157 115 { 158 - u64 *cmd_ptr; 116 + struct cmdq_instruction *cmd_ptr; 159 117 160 118 if (unlikely(pkt->cmd_buf_size + CMDQ_INST_SIZE > pkt->buf_size)) { 161 119 /* ··· 171 129 __func__, (u32)pkt->buf_size); 172 130 return -ENOMEM; 173 131 } 132 + 174 133 cmd_ptr = pkt->va_base + pkt->cmd_buf_size; 175 - (*cmd_ptr) = (u64)((code << CMDQ_OP_CODE_SHIFT) | arg_a) << 32 | arg_b; 134 + *cmd_ptr = inst; 176 135 pkt->cmd_buf_size += CMDQ_INST_SIZE; 177 136 178 137 return 0; ··· 181 138 182 139 int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value) 183 140 { 184 - u32 arg_a = (offset & CMDQ_ARG_A_WRITE_MASK) | 185 - (subsys << CMDQ_SUBSYS_SHIFT); 141 + struct cmdq_instruction inst; 186 142 187 - return cmdq_pkt_append_command(pkt, CMDQ_CODE_WRITE, arg_a, value); 143 + inst.op = CMDQ_CODE_WRITE; 144 + inst.value = value; 145 + inst.offset = offset; 146 + inst.subsys = subsys; 147 + 148 + return cmdq_pkt_append_command(pkt, inst); 188 149 } 189 150 EXPORT_SYMBOL(cmdq_pkt_write); 190 151 191 152 int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys, 192 153 u16 offset, u32 value, u32 mask) 193 154 { 194 - u32 offset_mask = offset; 195 - int err = 0; 155 + struct cmdq_instruction inst = { {0} }; 156 + u16 offset_mask = offset; 157 + int err; 196 158 197 159 if (mask != 0xffffffff) { 198 - err = cmdq_pkt_append_command(pkt, CMDQ_CODE_MASK, 0, ~mask); 160 + inst.op = CMDQ_CODE_MASK; 161 + inst.mask = ~mask; 162 + err = cmdq_pkt_append_command(pkt, inst); 163 + if (err < 0) 164 + return err; 165 + 199 166 offset_mask |= CMDQ_WRITE_ENABLE_MASK; 200 167 } 201 - err |= cmdq_pkt_write(pkt, subsys, offset_mask, value); 168 + err = cmdq_pkt_write(pkt, subsys, offset_mask, value); 202 169 203 170 return err; 204 171 } ··· 216 163 217 164 int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event) 218 165 { 219 - u32 arg_b; 166 + struct cmdq_instruction inst = { {0} }; 220 167 221 168 if (event >= CMDQ_MAX_EVENT) 222 169 return -EINVAL; 223 170 224 - /* 225 - * WFE arg_b 226 - * bit 0-11: wait value 227 - * bit 15: 1 - wait, 0 - no wait 228 - * bit 16-27: update value 229 - * bit 31: 1 - update, 0 - no update 230 - */ 231 - arg_b = CMDQ_WFE_UPDATE | CMDQ_WFE_WAIT | CMDQ_WFE_WAIT_VALUE; 171 + inst.op = CMDQ_CODE_WFE; 172 + inst.value = CMDQ_WFE_OPTION; 173 + inst.event = event; 232 174 233 - return cmdq_pkt_append_command(pkt, CMDQ_CODE_WFE, event, arg_b); 175 + return cmdq_pkt_append_command(pkt, inst); 234 176 } 235 177 EXPORT_SYMBOL(cmdq_pkt_wfe); 236 178 237 179 int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event) 238 180 { 181 + struct cmdq_instruction inst = { {0} }; 182 + 239 183 if (event >= CMDQ_MAX_EVENT) 240 184 return -EINVAL; 241 185 242 - return cmdq_pkt_append_command(pkt, CMDQ_CODE_WFE, event, 243 - CMDQ_WFE_UPDATE); 186 + inst.op = CMDQ_CODE_WFE; 187 + inst.value = CMDQ_WFE_UPDATE; 188 + inst.event = event; 189 + 190 + return cmdq_pkt_append_command(pkt, inst); 244 191 } 245 192 EXPORT_SYMBOL(cmdq_pkt_clear_event); 246 193 194 + int cmdq_pkt_poll(struct cmdq_pkt *pkt, u8 subsys, 195 + u16 offset, u32 value) 196 + { 197 + struct cmdq_instruction inst = { {0} }; 198 + int err; 199 + 200 + inst.op = CMDQ_CODE_POLL; 201 + inst.value = value; 202 + inst.offset = offset; 203 + inst.subsys = subsys; 204 + err = cmdq_pkt_append_command(pkt, inst); 205 + 206 + return err; 207 + } 208 + EXPORT_SYMBOL(cmdq_pkt_poll); 209 + 210 + int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys, 211 + u16 offset, u32 value, u32 mask) 212 + { 213 + struct cmdq_instruction inst = { {0} }; 214 + int err; 215 + 216 + inst.op = CMDQ_CODE_MASK; 217 + inst.mask = ~mask; 218 + err = cmdq_pkt_append_command(pkt, inst); 219 + if (err < 0) 220 + return err; 221 + 222 + offset = offset | CMDQ_POLL_ENABLE_MASK; 223 + err = cmdq_pkt_poll(pkt, subsys, offset, value); 224 + 225 + return err; 226 + } 227 + EXPORT_SYMBOL(cmdq_pkt_poll_mask); 228 + 247 229 static int cmdq_pkt_finalize(struct cmdq_pkt *pkt) 248 230 { 231 + struct cmdq_instruction inst = { {0} }; 249 232 int err; 250 233 251 234 /* insert EOC and generate IRQ for each command iteration */ 252 - err = cmdq_pkt_append_command(pkt, CMDQ_CODE_EOC, 0, CMDQ_EOC_IRQ_EN); 235 + inst.op = CMDQ_CODE_EOC; 236 + inst.value = CMDQ_EOC_IRQ_EN; 237 + err = cmdq_pkt_append_command(pkt, inst); 238 + if (err < 0) 239 + return err; 253 240 254 241 /* JUMP to end */ 255 - err |= cmdq_pkt_append_command(pkt, CMDQ_CODE_JUMP, 0, CMDQ_JUMP_PASS); 242 + inst.op = CMDQ_CODE_JUMP; 243 + inst.value = CMDQ_JUMP_PASS; 244 + err = cmdq_pkt_append_command(pkt, inst); 256 245 257 246 return err; 258 247 }
+11
include/linux/mailbox/mtk-cmdq-mailbox.h
··· 20 20 #define CMDQ_WFE_WAIT BIT(15) 21 21 #define CMDQ_WFE_WAIT_VALUE 0x1 22 22 23 + /* 24 + * WFE arg_b 25 + * bit 0-11: wait value 26 + * bit 15: 1 - wait, 0 - no wait 27 + * bit 16-27: update value 28 + * bit 31: 1 - update, 0 - no update 29 + */ 30 + #define CMDQ_WFE_OPTION (CMDQ_WFE_UPDATE | CMDQ_WFE_WAIT | \ 31 + CMDQ_WFE_WAIT_VALUE) 32 + 23 33 /** cmdq event maximum */ 24 34 #define CMDQ_MAX_EVENT 0x3ff 25 35 ··· 55 45 enum cmdq_code { 56 46 CMDQ_CODE_MASK = 0x02, 57 47 CMDQ_CODE_WRITE = 0x04, 48 + CMDQ_CODE_POLL = 0x08, 58 49 CMDQ_CODE_JUMP = 0x10, 59 50 CMDQ_CODE_WFE = 0x20, 60 51 CMDQ_CODE_EOC = 0x40,
+53
include/linux/soc/mediatek/mtk-cmdq.h
··· 15 15 16 16 struct cmdq_pkt; 17 17 18 + struct cmdq_client_reg { 19 + u8 subsys; 20 + u16 offset; 21 + u16 size; 22 + }; 23 + 18 24 struct cmdq_client { 19 25 spinlock_t lock; 20 26 u32 pkt_cnt; ··· 29 23 struct timer_list timer; 30 24 u32 timeout_ms; /* in unit of microsecond */ 31 25 }; 26 + 27 + /** 28 + * cmdq_dev_get_client_reg() - parse cmdq client reg from the device 29 + * node of CMDQ client 30 + * @dev: device of CMDQ mailbox client 31 + * @client_reg: CMDQ client reg pointer 32 + * @idx: the index of desired reg 33 + * 34 + * Return: 0 for success; else the error code is returned 35 + * 36 + * Help CMDQ client parsing the cmdq client reg 37 + * from the device node of CMDQ client. 38 + */ 39 + int cmdq_dev_get_client_reg(struct device *dev, 40 + struct cmdq_client_reg *client_reg, int idx); 32 41 33 42 /** 34 43 * cmdq_mbox_create() - create CMDQ mailbox client and channel ··· 120 99 */ 121 100 int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event); 122 101 102 + /** 103 + * cmdq_pkt_poll() - Append polling command to the CMDQ packet, ask GCE to 104 + * execute an instruction that wait for a specified 105 + * hardware register to check for the value w/o mask. 106 + * All GCE hardware threads will be blocked by this 107 + * instruction. 108 + * @pkt: the CMDQ packet 109 + * @subsys: the CMDQ sub system code 110 + * @offset: register offset from CMDQ sub system 111 + * @value: the specified target register value 112 + * 113 + * Return: 0 for success; else the error code is returned 114 + */ 115 + int cmdq_pkt_poll(struct cmdq_pkt *pkt, u8 subsys, 116 + u16 offset, u32 value); 117 + 118 + /** 119 + * cmdq_pkt_poll_mask() - Append polling command to the CMDQ packet, ask GCE to 120 + * execute an instruction that wait for a specified 121 + * hardware register to check for the value w/ mask. 122 + * All GCE hardware threads will be blocked by this 123 + * instruction. 124 + * @pkt: the CMDQ packet 125 + * @subsys: the CMDQ sub system code 126 + * @offset: register offset from CMDQ sub system 127 + * @value: the specified target register value 128 + * @mask: the specified target register mask 129 + * 130 + * Return: 0 for success; else the error code is returned 131 + */ 132 + int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys, 133 + u16 offset, u32 value, u32 mask); 123 134 /** 124 135 * cmdq_pkt_flush_async() - trigger CMDQ to asynchronously execute the CMDQ 125 136 * packet and call back at the end of done packet