Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/sti: atomic crtc/plane update

Better fit STI hardware structure.
Planes are no more responsible of updating mixer information such
as z-order and status. It is now up to the CRTC atomic flush to
do it. Plane actions (enable or disable) are performed atomically.
Disabling of a plane is synchronize with the vsync event.

Signed-off-by: Vincent Abriou <vincent.abriou@st.com>
Reviewed-by: Benjamin Gaignard <benjamin.gaignard@linaro.org>

authored by

Vincent Abriou and committed by
Benjamin Gaignard
29d1dc62 9e1f05b2

+811 -879
+12 -20
drivers/gpu/drm/sti/sti_compositor.c
··· 61 61 { 62 62 struct sti_compositor *compo = dev_get_drvdata(dev); 63 63 struct drm_device *drm_dev = data; 64 - unsigned int i, mixer_id = 0, vid_id = 0, crtc_id = 0, plane_id = 0; 64 + unsigned int i, mixer_id = 0, vid_id = 0, crtc_id = 0; 65 65 struct sti_private *dev_priv = drm_dev->dev_private; 66 66 struct drm_plane *cursor = NULL; 67 67 struct drm_plane *primary = NULL; 68 68 struct sti_compositor_subdev_descriptor *desc = compo->data.subdev_desc; 69 69 unsigned int array_size = compo->data.nb_subdev; 70 - 71 - struct sti_plane *plane; 72 70 73 71 dev_priv->compo = compo; 74 72 ··· 108 110 /* Nothing to do, already done at the first round */ 109 111 break; 110 112 case STI_CURSOR_SUBDEV: 111 - plane = sti_cursor_create(compo->dev, desc[i].id, 112 - compo->regs + desc[i].offset); 113 - if (!plane) { 113 + cursor = sti_cursor_create(drm_dev, compo->dev, 114 + desc[i].id, 115 + compo->regs + desc[i].offset, 116 + 1); 117 + if (!cursor) { 114 118 DRM_ERROR("Can't create CURSOR plane\n"); 115 119 break; 116 120 } 117 - cursor = sti_plane_init(drm_dev, plane, 1, 118 - DRM_PLANE_TYPE_CURSOR); 119 - plane_id++; 120 121 break; 121 122 case STI_GPD_SUBDEV: 122 - plane = sti_gdp_create(compo->dev, desc[i].id, 123 - compo->regs + desc[i].offset); 124 - if (!plane) { 123 + primary = sti_gdp_create(drm_dev, compo->dev, 124 + desc[i].id, 125 + compo->regs + desc[i].offset, 126 + (1 << mixer_id) - 1, 127 + plane_type); 128 + if (!primary) { 125 129 DRM_ERROR("Can't create GDP plane\n"); 126 130 break; 127 131 } 128 - primary = sti_plane_init(drm_dev, plane, 129 - (1 << mixer_id) - 1, 130 - plane_type); 131 - plane_id++; 132 132 break; 133 133 default: 134 134 DRM_ERROR("Unknown subdev compoment type\n"); ··· 146 150 drm_vblank_init(drm_dev, crtc_id); 147 151 /* Allow usage of vblank without having to call drm_irq_install */ 148 152 drm_dev->irq_enabled = 1; 149 - 150 - DRM_DEBUG_DRIVER("Initialized %d DRM CRTC(s) and %d DRM plane(s)\n", 151 - crtc_id, plane_id); 152 - DRM_DEBUG_DRIVER("DRM plane(s) for VID/VDP not created yet\n"); 153 153 154 154 return 0; 155 155 }
+95 -38
drivers/gpu/drm/sti/sti_crtc.c
··· 17 17 #include "sti_compositor.h" 18 18 #include "sti_crtc.h" 19 19 #include "sti_drv.h" 20 + #include "sti_vid.h" 20 21 #include "sti_vtg.h" 21 22 22 - static void sti_crtc_dpms(struct drm_crtc *crtc, int mode) 23 - { 24 - DRM_DEBUG_KMS("\n"); 25 - } 26 - 27 - static void sti_crtc_prepare(struct drm_crtc *crtc) 23 + static void sti_crtc_enable(struct drm_crtc *crtc) 28 24 { 29 25 struct sti_mixer *mixer = to_sti_mixer(crtc); 30 26 struct device *dev = mixer->dev; 31 27 struct sti_compositor *compo = dev_get_drvdata(dev); 32 28 33 - mixer->enabled = true; 29 + DRM_DEBUG_DRIVER("\n"); 30 + 31 + mixer->status = STI_MIXER_READY; 34 32 35 33 /* Prepare and enable the compo IP clock */ 36 34 if (mixer->id == STI_MIXER_MAIN) { ··· 39 41 DRM_INFO("Failed to prepare/enable compo_aux clk\n"); 40 42 } 41 43 42 - sti_mixer_clear_all_planes(mixer); 44 + drm_crtc_vblank_on(crtc); 43 45 } 44 46 45 - static void sti_crtc_commit(struct drm_crtc *crtc) 47 + static void sti_crtc_disabling(struct drm_crtc *crtc) 46 48 { 47 49 struct sti_mixer *mixer = to_sti_mixer(crtc); 48 - struct device *dev = mixer->dev; 49 - struct sti_compositor *compo = dev_get_drvdata(dev); 50 - struct sti_plane *plane; 51 50 52 - if ((!mixer || !compo)) { 53 - DRM_ERROR("Can't find mixer or compositor)\n"); 54 - return; 55 - } 51 + DRM_DEBUG_DRIVER("\n"); 56 52 57 - /* get GDP which is reserved to the CRTC FB */ 58 - plane = to_sti_plane(crtc->primary); 59 - if (!plane) 60 - DRM_ERROR("Can't find CRTC dedicated plane (GDP0)\n"); 61 - 62 - /* Enable plane on mixer */ 63 - if (sti_mixer_set_plane_status(mixer, plane, true)) 64 - DRM_ERROR("Cannot enable plane at mixer\n"); 65 - 66 - drm_crtc_vblank_on(crtc); 53 + mixer->status = STI_MIXER_DISABLING; 67 54 } 68 55 69 56 static bool sti_crtc_mode_fixup(struct drm_crtc *crtc, ··· 116 133 struct device *dev = mixer->dev; 117 134 struct sti_compositor *compo = dev_get_drvdata(dev); 118 135 119 - if (!mixer->enabled) 120 - return; 121 - 122 136 DRM_DEBUG_KMS("CRTC:%d (%s)\n", crtc->base.id, sti_mixer_to_str(mixer)); 123 137 124 138 /* Disable Background */ ··· 132 152 clk_disable_unprepare(compo->clk_compo_aux); 133 153 } 134 154 135 - mixer->enabled = false; 155 + mixer->status = STI_MIXER_DISABLED; 136 156 } 137 157 138 158 static void 139 159 sti_crtc_mode_set_nofb(struct drm_crtc *crtc) 140 160 { 141 - sti_crtc_prepare(crtc); 161 + sti_crtc_enable(crtc); 142 162 sti_crtc_mode_set(crtc, &crtc->state->adjusted_mode); 143 163 } 144 164 ··· 158 178 159 179 static void sti_crtc_atomic_flush(struct drm_crtc *crtc) 160 180 { 181 + struct drm_device *drm_dev = crtc->dev; 182 + struct sti_mixer *mixer = to_sti_mixer(crtc); 183 + struct sti_compositor *compo = dev_get_drvdata(mixer->dev); 184 + struct drm_plane *p; 185 + 186 + DRM_DEBUG_DRIVER("\n"); 187 + 188 + /* perform plane actions */ 189 + list_for_each_entry(p, &drm_dev->mode_config.plane_list, head) { 190 + struct sti_plane *plane = to_sti_plane(p); 191 + 192 + switch (plane->status) { 193 + case STI_PLANE_UPDATED: 194 + /* update planes tag as updated */ 195 + DRM_DEBUG_DRIVER("update plane %s\n", 196 + sti_plane_to_str(plane)); 197 + 198 + if (sti_mixer_set_plane_depth(mixer, plane)) { 199 + DRM_ERROR("Cannot set plane %s depth\n", 200 + sti_plane_to_str(plane)); 201 + break; 202 + } 203 + 204 + if (sti_mixer_set_plane_status(mixer, plane, true)) { 205 + DRM_ERROR("Cannot enable plane %s at mixer\n", 206 + sti_plane_to_str(plane)); 207 + break; 208 + } 209 + 210 + /* if plane is HQVDP_0 then commit the vid[0] */ 211 + if (plane->desc == STI_HQVDP_0) 212 + sti_vid_commit(compo->vid[0], p->state); 213 + 214 + plane->status = STI_PLANE_READY; 215 + 216 + break; 217 + case STI_PLANE_DISABLING: 218 + /* disabling sequence for planes tag as disabling */ 219 + DRM_DEBUG_DRIVER("disable plane %s from mixer\n", 220 + sti_plane_to_str(plane)); 221 + 222 + if (sti_mixer_set_plane_status(mixer, plane, false)) { 223 + DRM_ERROR("Cannot disable plane %s at mixer\n", 224 + sti_plane_to_str(plane)); 225 + continue; 226 + } 227 + 228 + if (plane->desc == STI_CURSOR) 229 + /* tag plane status for disabled */ 230 + plane->status = STI_PLANE_DISABLED; 231 + else 232 + /* tag plane status for flushing */ 233 + plane->status = STI_PLANE_FLUSHING; 234 + 235 + /* if plane is HQVDP_0 then disable the vid[0] */ 236 + if (plane->desc == STI_HQVDP_0) 237 + sti_vid_disable(compo->vid[0]); 238 + 239 + break; 240 + default: 241 + /* Other status case are not handled */ 242 + break; 243 + } 244 + } 161 245 } 162 246 163 247 static struct drm_crtc_helper_funcs sti_crtc_helper_funcs = { 164 - .dpms = sti_crtc_dpms, 165 - .prepare = sti_crtc_prepare, 166 - .commit = sti_crtc_commit, 248 + .enable = sti_crtc_enable, 249 + .disable = sti_crtc_disabling, 167 250 .mode_fixup = sti_crtc_mode_fixup, 168 251 .mode_set = drm_helper_crtc_mode_set, 169 252 .mode_set_nofb = sti_crtc_mode_set_nofb, 170 253 .mode_set_base = drm_helper_crtc_mode_set_base, 171 - .disable = sti_crtc_disable, 172 254 .atomic_begin = sti_crtc_atomic_begin, 173 255 .atomic_flush = sti_crtc_atomic_flush, 174 256 }; ··· 279 237 } 280 238 spin_unlock_irqrestore(&drm_dev->event_lock, flags); 281 239 240 + if (compo->mixer[*crtc]->status == STI_MIXER_DISABLING) { 241 + struct drm_plane *p; 242 + 243 + /* Disable mixer only if all overlay planes (GDP and VDP) 244 + * are disabled */ 245 + list_for_each_entry(p, &drm_dev->mode_config.plane_list, head) { 246 + struct sti_plane *plane = to_sti_plane(p); 247 + 248 + if ((plane->desc & STI_PLANE_TYPE_MASK) <= STI_VDP) 249 + if (plane->status != STI_PLANE_DISABLED) 250 + return 0; 251 + } 252 + sti_crtc_disable(&compo->mixer[*crtc]->drm_crtc); 253 + } 254 + 282 255 return 0; 283 256 } 284 257 ··· 316 259 } 317 260 EXPORT_SYMBOL(sti_crtc_enable_vblank); 318 261 319 - void sti_crtc_disable_vblank(struct drm_device *dev, int crtc) 262 + void sti_crtc_disable_vblank(struct drm_device *drm_dev, int crtc) 320 263 { 321 - struct sti_private *priv = dev->dev_private; 264 + struct sti_private *priv = drm_dev->dev_private; 322 265 struct sti_compositor *compo = priv->compo; 323 266 struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb; 324 267 ··· 330 273 331 274 /* free the resources of the pending requests */ 332 275 if (compo->mixer[crtc]->pending_event) { 333 - drm_vblank_put(dev, crtc); 276 + drm_vblank_put(drm_dev, crtc); 334 277 compo->mixer[crtc]->pending_event = NULL; 335 278 } 336 279 }
+129 -86
drivers/gpu/drm/sti/sti_cursor.c
··· 7 7 */ 8 8 #include <drm/drmP.h> 9 9 10 + #include <drm/drm_atomic_helper.h> 11 + #include <drm/drm_fb_cma_helper.h> 12 + #include <drm/drm_gem_cma_helper.h> 13 + #include <drm/drm_plane_helper.h> 14 + 15 + #include "sti_compositor.h" 10 16 #include "sti_cursor.h" 11 17 #include "sti_plane.h" 12 18 #include "sti_vtg.h" ··· 48 42 /** 49 43 * STI Cursor structure 50 44 * 51 - * @sti_plane: sti_plane structure 52 - * @dev: driver device 53 - * @regs: cursor registers 54 - * @width: cursor width 55 - * @height: cursor height 56 - * @clut: color look up table 57 - * @clut_paddr: color look up table physical address 58 - * @pixmap: pixmap dma buffer (clut8-format cursor) 45 + * @sti_plane: sti_plane structure 46 + * @dev: driver device 47 + * @regs: cursor registers 48 + * @width: cursor width 49 + * @height: cursor height 50 + * @clut: color look up table 51 + * @clut_paddr: color look up table physical address 52 + * @pixmap: pixmap dma buffer (clut8-format cursor) 59 53 */ 60 54 struct sti_cursor { 61 55 struct sti_plane plane; ··· 74 68 75 69 #define to_sti_cursor(x) container_of(x, struct sti_cursor, plane) 76 70 77 - static const uint32_t *sti_cursor_get_formats(struct sti_plane *plane) 71 + static void sti_cursor_argb8888_to_clut8(struct sti_cursor *cursor, u32 *src) 78 72 { 79 - return cursor_supported_formats; 80 - } 81 - 82 - static unsigned int sti_cursor_get_nb_formats(struct sti_plane *plane) 83 - { 84 - return ARRAY_SIZE(cursor_supported_formats); 85 - } 86 - 87 - static void sti_cursor_argb8888_to_clut8(struct sti_plane *plane) 88 - { 89 - struct sti_cursor *cursor = to_sti_cursor(plane); 90 - u32 *src = plane->vaddr; 91 73 u8 *dst = cursor->pixmap.base; 92 74 unsigned int i, j; 93 75 u32 a, r, g, b; ··· 94 100 } 95 101 } 96 102 97 - static int sti_cursor_prepare_plane(struct sti_plane *plane, bool first_prepare) 103 + static void sti_cursor_init(struct sti_cursor *cursor) 98 104 { 105 + unsigned short *base = cursor->clut; 106 + unsigned int a, r, g, b; 107 + 108 + /* Assign CLUT values, ARGB444 format */ 109 + for (a = 0; a < 4; a++) 110 + for (r = 0; r < 4; r++) 111 + for (g = 0; g < 4; g++) 112 + for (b = 0; b < 4; b++) 113 + *base++ = (a * 5) << 12 | 114 + (r * 5) << 8 | 115 + (g * 5) << 4 | 116 + (b * 5); 117 + } 118 + 119 + static void sti_cursor_atomic_update(struct drm_plane *drm_plane, 120 + struct drm_plane_state *oldstate) 121 + { 122 + struct drm_plane_state *state = drm_plane->state; 123 + struct sti_plane *plane = to_sti_plane(drm_plane); 99 124 struct sti_cursor *cursor = to_sti_cursor(plane); 100 - struct drm_display_mode *mode = plane->mode; 125 + struct drm_crtc *crtc = state->crtc; 126 + struct sti_mixer *mixer = to_sti_mixer(crtc); 127 + struct drm_framebuffer *fb = state->fb; 128 + struct drm_display_mode *mode = &crtc->mode; 129 + int dst_x = state->crtc_x; 130 + int dst_y = state->crtc_y; 131 + int dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x); 132 + int dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y); 133 + /* src_x are in 16.16 format */ 134 + int src_w = state->src_w >> 16; 135 + int src_h = state->src_h >> 16; 136 + bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false; 137 + struct drm_gem_cma_object *cma_obj; 101 138 u32 y, x; 102 139 u32 val; 103 140 104 - DRM_DEBUG_DRIVER("\n"); 141 + DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n", 142 + crtc->base.id, sti_mixer_to_str(mixer), 143 + drm_plane->base.id, sti_plane_to_str(plane)); 144 + DRM_DEBUG_KMS("(%dx%d)@(%d,%d)\n", dst_w, dst_h, dst_x, dst_y); 105 145 106 - dev_dbg(cursor->dev, "%s %s\n", __func__, sti_plane_to_str(plane)); 146 + dev_dbg(cursor->dev, "%s %s\n", __func__, 147 + sti_plane_to_str(plane)); 107 148 108 - if (plane->src_w < STI_CURS_MIN_SIZE || 109 - plane->src_h < STI_CURS_MIN_SIZE || 110 - plane->src_w > STI_CURS_MAX_SIZE || 111 - plane->src_h > STI_CURS_MAX_SIZE) { 149 + if (src_w < STI_CURS_MIN_SIZE || 150 + src_h < STI_CURS_MIN_SIZE || 151 + src_w > STI_CURS_MAX_SIZE || 152 + src_h > STI_CURS_MAX_SIZE) { 112 153 DRM_ERROR("Invalid cursor size (%dx%d)\n", 113 - plane->src_w, plane->src_h); 114 - return -EINVAL; 154 + src_w, src_h); 155 + return; 115 156 } 116 157 117 158 /* If the cursor size has changed, re-allocated the pixmap */ 118 159 if (!cursor->pixmap.base || 119 - (cursor->width != plane->src_w) || 120 - (cursor->height != plane->src_h)) { 121 - cursor->width = plane->src_w; 122 - cursor->height = plane->src_h; 160 + (cursor->width != src_w) || 161 + (cursor->height != src_h)) { 162 + cursor->width = src_w; 163 + cursor->height = src_h; 123 164 124 165 if (cursor->pixmap.base) 125 166 dma_free_writecombine(cursor->dev, ··· 170 141 GFP_KERNEL | GFP_DMA); 171 142 if (!cursor->pixmap.base) { 172 143 DRM_ERROR("Failed to allocate memory for pixmap\n"); 173 - return -ENOMEM; 144 + return; 174 145 } 175 146 } 176 147 148 + cma_obj = drm_fb_cma_get_gem_obj(fb, 0); 149 + if (!cma_obj) { 150 + DRM_ERROR("Can't get CMA GEM object for fb\n"); 151 + return; 152 + } 153 + 177 154 /* Convert ARGB8888 to CLUT8 */ 178 - sti_cursor_argb8888_to_clut8(plane); 155 + sti_cursor_argb8888_to_clut8(cursor, (u32 *)cma_obj->vaddr); 179 156 180 157 /* AWS and AWE depend on the mode */ 181 158 y = sti_vtg_get_line_number(*mode, 0); ··· 199 164 writel(CUR_CTL_CLUT_UPDATE, cursor->regs + CUR_CTL); 200 165 } 201 166 202 - return 0; 203 - } 204 - 205 - static int sti_cursor_commit_plane(struct sti_plane *plane) 206 - { 207 - struct sti_cursor *cursor = to_sti_cursor(plane); 208 - struct drm_display_mode *mode = plane->mode; 209 - u32 ydo, xdo; 210 - 211 - dev_dbg(cursor->dev, "%s %s\n", __func__, sti_plane_to_str(plane)); 212 - 213 167 /* Set memory location, size, and position */ 214 168 writel(cursor->pixmap.paddr, cursor->regs + CUR_PML); 215 169 writel(cursor->width, cursor->regs + CUR_PMP); 216 170 writel(cursor->height << 16 | cursor->width, cursor->regs + CUR_SIZE); 217 171 218 - ydo = sti_vtg_get_line_number(*mode, plane->dst_y); 219 - xdo = sti_vtg_get_pixel_number(*mode, plane->dst_y); 220 - writel((ydo << 16) | xdo, cursor->regs + CUR_VPO); 172 + y = sti_vtg_get_line_number(*mode, dst_y); 173 + x = sti_vtg_get_pixel_number(*mode, dst_y); 174 + writel((y << 16) | x, cursor->regs + CUR_VPO); 221 175 222 - return 0; 176 + plane->status = STI_PLANE_UPDATED; 223 177 } 224 178 225 - static int sti_cursor_disable_plane(struct sti_plane *plane) 179 + static void sti_cursor_atomic_disable(struct drm_plane *drm_plane, 180 + struct drm_plane_state *oldstate) 226 181 { 227 - return 0; 182 + struct sti_plane *plane = to_sti_plane(drm_plane); 183 + struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc); 184 + 185 + if (!drm_plane->crtc) { 186 + DRM_DEBUG_DRIVER("drm plane:%d not enabled\n", 187 + drm_plane->base.id); 188 + return; 189 + } 190 + 191 + DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n", 192 + drm_plane->crtc->base.id, sti_mixer_to_str(mixer), 193 + drm_plane->base.id, sti_plane_to_str(plane)); 194 + 195 + plane->status = STI_PLANE_DISABLING; 228 196 } 229 197 230 - static void sti_cursor_init(struct sti_cursor *cursor) 231 - { 232 - unsigned short *base = cursor->clut; 233 - unsigned int a, r, g, b; 234 - 235 - /* Assign CLUT values, ARGB444 format */ 236 - for (a = 0; a < 4; a++) 237 - for (r = 0; r < 4; r++) 238 - for (g = 0; g < 4; g++) 239 - for (b = 0; b < 4; b++) 240 - *base++ = (a * 5) << 12 | 241 - (r * 5) << 8 | 242 - (g * 5) << 4 | 243 - (b * 5); 244 - } 245 - 246 - static const struct sti_plane_funcs cursor_plane_ops = { 247 - .get_formats = sti_cursor_get_formats, 248 - .get_nb_formats = sti_cursor_get_nb_formats, 249 - .prepare = sti_cursor_prepare_plane, 250 - .commit = sti_cursor_commit_plane, 251 - .disable = sti_cursor_disable_plane, 198 + static const struct drm_plane_helper_funcs sti_cursor_helpers_funcs = { 199 + .atomic_update = sti_cursor_atomic_update, 200 + .atomic_disable = sti_cursor_atomic_disable, 252 201 }; 253 202 254 - struct sti_plane *sti_cursor_create(struct device *dev, int desc, 255 - void __iomem *baseaddr) 203 + struct drm_plane *sti_cursor_create(struct drm_device *drm_dev, 204 + struct device *dev, int desc, 205 + void __iomem *baseaddr, 206 + unsigned int possible_crtcs) 256 207 { 257 208 struct sti_cursor *cursor; 209 + size_t size; 210 + int res; 258 211 259 212 cursor = devm_kzalloc(dev, sizeof(*cursor), GFP_KERNEL); 260 213 if (!cursor) { ··· 251 228 } 252 229 253 230 /* Allocate clut buffer */ 254 - cursor->clut = dma_alloc_writecombine(dev, 255 - 0x100 * sizeof(unsigned short), 256 - &cursor->clut_paddr, 257 - GFP_KERNEL | GFP_DMA); 231 + size = 0x100 * sizeof(unsigned short); 232 + cursor->clut = dma_alloc_writecombine(dev, size, &cursor->clut_paddr, 233 + GFP_KERNEL | GFP_DMA); 258 234 259 235 if (!cursor->clut) { 260 236 DRM_ERROR("Failed to allocate memory for cursor clut\n"); 261 - devm_kfree(dev, cursor); 262 - return NULL; 237 + goto err_clut; 263 238 } 264 239 265 240 cursor->dev = dev; 266 241 cursor->regs = baseaddr; 267 242 cursor->plane.desc = desc; 268 - cursor->plane.ops = &cursor_plane_ops; 243 + cursor->plane.status = STI_PLANE_DISABLED; 269 244 270 245 sti_cursor_init(cursor); 271 246 272 - return &cursor->plane; 247 + res = drm_universal_plane_init(drm_dev, &cursor->plane.drm_plane, 248 + possible_crtcs, 249 + &sti_plane_helpers_funcs, 250 + cursor_supported_formats, 251 + ARRAY_SIZE(cursor_supported_formats), 252 + DRM_PLANE_TYPE_CURSOR); 253 + if (res) { 254 + DRM_ERROR("Failed to initialize universal plane\n"); 255 + goto err_plane; 256 + } 257 + 258 + drm_plane_helper_add(&cursor->plane.drm_plane, 259 + &sti_cursor_helpers_funcs); 260 + 261 + sti_plane_init_property(&cursor->plane, DRM_PLANE_TYPE_CURSOR); 262 + 263 + return &cursor->plane.drm_plane; 264 + 265 + err_plane: 266 + dma_free_writecombine(dev, size, cursor->clut, cursor->clut_paddr); 267 + err_clut: 268 + devm_kfree(dev, cursor); 269 + return NULL; 273 270 }
+4 -2
drivers/gpu/drm/sti/sti_cursor.h
··· 7 7 #ifndef _STI_CURSOR_H_ 8 8 #define _STI_CURSOR_H_ 9 9 10 - struct sti_plane *sti_cursor_create(struct device *dev, int desc, 11 - void __iomem *baseaddr); 10 + struct drm_plane *sti_cursor_create(struct drm_device *drm_dev, 11 + struct device *dev, int desc, 12 + void __iomem *baseaddr, 13 + unsigned int possible_crtcs); 12 14 13 15 #endif
+274 -219
drivers/gpu/drm/sti/sti_gdp.c
··· 9 9 #include <linux/clk.h> 10 10 #include <linux/dma-mapping.h> 11 11 12 + #include <drm/drm_fb_cma_helper.h> 13 + #include <drm/drm_gem_cma_helper.h> 14 + 12 15 #include "sti_compositor.h" 13 16 #include "sti_gdp.h" 14 17 #include "sti_plane.h" ··· 29 26 #define GDP_XBGR8888 (GDP_RGB888_32 | BIGNOTLITTLE | ALPHASWITCH) 30 27 #define GDP_ARGB8565 0x04 31 28 #define GDP_ARGB8888 0x05 32 - #define GDP_ABGR8888 (GDP_ARGB8888 | BIGNOTLITTLE | ALPHASWITCH) 29 + #define GDP_ABGR8888 (GDP_ARGB8888 | BIGNOTLITTLE | ALPHASWITCH) 33 30 #define GDP_ARGB1555 0x06 34 31 #define GDP_ARGB4444 0x07 35 32 #define GDP_CLUT8 0x0B ··· 56 53 #define GAM_GDP_PPT_IGNORE (BIT(1) | BIT(0)) 57 54 #define GAM_GDP_SIZE_MAX 0x7FF 58 55 59 - #define GDP_NODE_NB_BANK 2 60 - #define GDP_NODE_PER_FIELD 2 56 + #define GDP_NODE_NB_BANK 2 57 + #define GDP_NODE_PER_FIELD 2 61 58 62 59 struct sti_gdp_node { 63 60 u32 gam_gdp_ctl; ··· 127 124 DRM_FORMAT_C8, 128 125 }; 129 126 130 - static const uint32_t *sti_gdp_get_formats(struct sti_plane *plane) 131 - { 132 - return gdp_supported_formats; 133 - } 134 - 135 - static unsigned int sti_gdp_get_nb_formats(struct sti_plane *plane) 136 - { 137 - return ARRAY_SIZE(gdp_supported_formats); 138 - } 139 - 140 127 static int sti_gdp_fourcc2format(int fourcc) 141 128 { 142 129 switch (fourcc) { ··· 172 179 173 180 /** 174 181 * sti_gdp_get_free_nodes 175 - * @plane: gdp plane 182 + * @gdp: gdp pointer 176 183 * 177 184 * Look for a GDP node list that is not currently read by the HW. 178 185 * 179 186 * RETURNS: 180 187 * Pointer to the free GDP node list 181 188 */ 182 - static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_plane *plane) 189 + static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_gdp *gdp) 183 190 { 184 191 int hw_nvn; 185 - struct sti_gdp *gdp = to_sti_gdp(plane); 186 192 unsigned int i; 187 193 188 194 hw_nvn = readl(gdp->regs + GAM_GDP_NVN_OFFSET); ··· 195 203 196 204 /* in hazardious cases restart with the first node */ 197 205 DRM_ERROR("inconsistent NVN for %s: 0x%08X\n", 198 - sti_plane_to_str(plane), hw_nvn); 206 + sti_plane_to_str(&gdp->plane), hw_nvn); 199 207 200 208 end: 201 209 return &gdp->node_list[0]; ··· 203 211 204 212 /** 205 213 * sti_gdp_get_current_nodes 206 - * @plane: GDP plane 214 + * @gdp: gdp pointer 207 215 * 208 216 * Look for GDP nodes that are currently read by the HW. 209 217 * ··· 211 219 * Pointer to the current GDP node list 212 220 */ 213 221 static 214 - struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_plane *plane) 222 + struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_gdp *gdp) 215 223 { 216 224 int hw_nvn; 217 - struct sti_gdp *gdp = to_sti_gdp(plane); 218 225 unsigned int i; 219 226 220 227 hw_nvn = readl(gdp->regs + GAM_GDP_NVN_OFFSET); ··· 227 236 228 237 end: 229 238 DRM_DEBUG_DRIVER("Warning, NVN 0x%08X for %s does not match any node\n", 230 - hw_nvn, sti_plane_to_str(plane)); 239 + hw_nvn, sti_plane_to_str(&gdp->plane)); 231 240 232 241 return NULL; 233 242 } 234 243 235 244 /** 236 - * sti_gdp_prepare 237 - * @plane: gdp plane 238 - * @first_prepare: true if it is the first time this function is called 239 - * 240 - * Update the free GDP node list according to the plane properties. 241 - * 242 - * RETURNS: 243 - * 0 on success. 244 - */ 245 - static int sti_gdp_prepare(struct sti_plane *plane, bool first_prepare) 246 - { 247 - struct sti_gdp_node_list *list; 248 - struct sti_gdp_node *top_field, *btm_field; 249 - struct drm_display_mode *mode = plane->mode; 250 - struct sti_gdp *gdp = to_sti_gdp(plane); 251 - struct device *dev = gdp->dev; 252 - struct sti_compositor *compo = dev_get_drvdata(dev); 253 - int format; 254 - unsigned int depth, bpp; 255 - int rate = mode->clock * 1000; 256 - int res; 257 - u32 ydo, xdo, yds, xds; 258 - 259 - list = sti_gdp_get_free_nodes(plane); 260 - top_field = list->top_field; 261 - btm_field = list->btm_field; 262 - 263 - dev_dbg(dev, "%s %s top_node:0x%p btm_node:0x%p\n", __func__, 264 - sti_plane_to_str(plane), top_field, btm_field); 265 - 266 - /* Build the top field from plane params */ 267 - top_field->gam_gdp_agc = GAM_GDP_AGC_FULL_RANGE; 268 - top_field->gam_gdp_ctl = WAIT_NEXT_VSYNC; 269 - format = sti_gdp_fourcc2format(plane->format); 270 - if (format == -1) { 271 - DRM_ERROR("Format not supported by GDP %.4s\n", 272 - (char *)&plane->format); 273 - return 1; 274 - } 275 - top_field->gam_gdp_ctl |= format; 276 - top_field->gam_gdp_ctl |= sti_gdp_get_alpharange(format); 277 - top_field->gam_gdp_ppt &= ~GAM_GDP_PPT_IGNORE; 278 - 279 - /* pixel memory location */ 280 - drm_fb_get_bpp_depth(plane->format, &depth, &bpp); 281 - top_field->gam_gdp_pml = (u32)plane->paddr + plane->offsets[0]; 282 - top_field->gam_gdp_pml += plane->src_x * (bpp >> 3); 283 - top_field->gam_gdp_pml += plane->src_y * plane->pitches[0]; 284 - 285 - /* input parameters */ 286 - top_field->gam_gdp_pmp = plane->pitches[0]; 287 - top_field->gam_gdp_size = 288 - clamp_val(plane->src_h, 0, GAM_GDP_SIZE_MAX) << 16 | 289 - clamp_val(plane->src_w, 0, GAM_GDP_SIZE_MAX); 290 - 291 - /* output parameters */ 292 - ydo = sti_vtg_get_line_number(*mode, plane->dst_y); 293 - yds = sti_vtg_get_line_number(*mode, plane->dst_y + plane->dst_h - 1); 294 - xdo = sti_vtg_get_pixel_number(*mode, plane->dst_x); 295 - xds = sti_vtg_get_pixel_number(*mode, plane->dst_x + plane->dst_w - 1); 296 - top_field->gam_gdp_vpo = (ydo << 16) | xdo; 297 - top_field->gam_gdp_vps = (yds << 16) | xds; 298 - 299 - /* Same content and chained together */ 300 - memcpy(btm_field, top_field, sizeof(*btm_field)); 301 - top_field->gam_gdp_nvn = list->btm_field_paddr; 302 - btm_field->gam_gdp_nvn = list->top_field_paddr; 303 - 304 - /* Interlaced mode */ 305 - if (plane->mode->flags & DRM_MODE_FLAG_INTERLACE) 306 - btm_field->gam_gdp_pml = top_field->gam_gdp_pml + 307 - plane->pitches[0]; 308 - 309 - if (first_prepare) { 310 - /* Register gdp callback */ 311 - if (sti_vtg_register_client(plane->mixer_id == STI_MIXER_MAIN ? 312 - compo->vtg_main : compo->vtg_aux, 313 - &gdp->vtg_field_nb, plane->mixer_id)) { 314 - DRM_ERROR("Cannot register VTG notifier\n"); 315 - return 1; 316 - } 317 - 318 - /* Set and enable gdp clock */ 319 - if (gdp->clk_pix) { 320 - struct clk *clkp; 321 - /* According to the mixer used, the gdp pixel clock 322 - * should have a different parent clock. */ 323 - if (plane->mixer_id == STI_MIXER_MAIN) 324 - clkp = gdp->clk_main_parent; 325 - else 326 - clkp = gdp->clk_aux_parent; 327 - 328 - if (clkp) 329 - clk_set_parent(gdp->clk_pix, clkp); 330 - 331 - res = clk_set_rate(gdp->clk_pix, rate); 332 - if (res < 0) { 333 - DRM_ERROR("Cannot set rate (%dHz) for gdp\n", 334 - rate); 335 - return 1; 336 - } 337 - 338 - if (clk_prepare_enable(gdp->clk_pix)) { 339 - DRM_ERROR("Failed to prepare/enable gdp\n"); 340 - return 1; 341 - } 342 - } 343 - } 344 - 345 - return 0; 346 - } 347 - 348 - /** 349 - * sti_gdp_commit 350 - * @plane: gdp plane 351 - * 352 - * Update the NVN field of the 'right' field of the current GDP node (being 353 - * used by the HW) with the address of the updated ('free') top field GDP node. 354 - * - In interlaced mode the 'right' field is the bottom field as we update 355 - * frames starting from their top field 356 - * - In progressive mode, we update both bottom and top fields which are 357 - * equal nodes. 358 - * At the next VSYNC, the updated node list will be used by the HW. 359 - * 360 - * RETURNS: 361 - * 0 on success. 362 - */ 363 - static int sti_gdp_commit(struct sti_plane *plane) 364 - { 365 - struct sti_gdp_node_list *updated_list = sti_gdp_get_free_nodes(plane); 366 - struct sti_gdp_node *updated_top_node = updated_list->top_field; 367 - struct sti_gdp_node *updated_btm_node = updated_list->btm_field; 368 - struct sti_gdp *gdp = to_sti_gdp(plane); 369 - u32 dma_updated_top = updated_list->top_field_paddr; 370 - u32 dma_updated_btm = updated_list->btm_field_paddr; 371 - struct sti_gdp_node_list *curr_list = sti_gdp_get_current_nodes(plane); 372 - 373 - dev_dbg(gdp->dev, "%s %s top/btm_node:0x%p/0x%p\n", __func__, 374 - sti_plane_to_str(plane), 375 - updated_top_node, updated_btm_node); 376 - dev_dbg(gdp->dev, "Current NVN:0x%X\n", 377 - readl(gdp->regs + GAM_GDP_NVN_OFFSET)); 378 - dev_dbg(gdp->dev, "Posted buff: %lx current buff: %x\n", 379 - (unsigned long)plane->paddr, 380 - readl(gdp->regs + GAM_GDP_PML_OFFSET)); 381 - 382 - if (curr_list == NULL) { 383 - /* First update or invalid node should directly write in the 384 - * hw register */ 385 - DRM_DEBUG_DRIVER("%s first update (or invalid node)", 386 - sti_plane_to_str(plane)); 387 - 388 - writel(gdp->is_curr_top == true ? 389 - dma_updated_btm : dma_updated_top, 390 - gdp->regs + GAM_GDP_NVN_OFFSET); 391 - return 0; 392 - } 393 - 394 - if (plane->mode->flags & DRM_MODE_FLAG_INTERLACE) { 395 - if (gdp->is_curr_top == true) { 396 - /* Do not update in the middle of the frame, but 397 - * postpone the update after the bottom field has 398 - * been displayed */ 399 - curr_list->btm_field->gam_gdp_nvn = dma_updated_top; 400 - } else { 401 - /* Direct update to avoid one frame delay */ 402 - writel(dma_updated_top, 403 - gdp->regs + GAM_GDP_NVN_OFFSET); 404 - } 405 - } else { 406 - /* Direct update for progressive to avoid one frame delay */ 407 - writel(dma_updated_top, gdp->regs + GAM_GDP_NVN_OFFSET); 408 - } 409 - 410 - return 0; 411 - } 412 - 413 - /** 414 245 * sti_gdp_disable 415 - * @plane: gdp plane 246 + * @gdp: gdp pointer 416 247 * 417 248 * Disable a GDP. 418 - * 419 - * RETURNS: 420 - * 0 on success. 421 249 */ 422 - static int sti_gdp_disable(struct sti_plane *plane) 250 + static void sti_gdp_disable(struct sti_gdp *gdp) 423 251 { 424 - unsigned int i; 425 - struct sti_gdp *gdp = to_sti_gdp(plane); 252 + struct drm_plane *drm_plane = &gdp->plane.drm_plane; 253 + struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc); 426 254 struct sti_compositor *compo = dev_get_drvdata(gdp->dev); 255 + unsigned int i; 427 256 428 - DRM_DEBUG_DRIVER("%s\n", sti_plane_to_str(plane)); 257 + DRM_DEBUG_DRIVER("%s\n", sti_plane_to_str(&gdp->plane)); 429 258 430 259 /* Set the nodes as 'to be ignored on mixer' */ 431 260 for (i = 0; i < GDP_NODE_NB_BANK; i++) { ··· 253 442 gdp->node_list[i].btm_field->gam_gdp_ppt |= GAM_GDP_PPT_IGNORE; 254 443 } 255 444 256 - if (sti_vtg_unregister_client(plane->mixer_id == STI_MIXER_MAIN ? 445 + if (sti_vtg_unregister_client(mixer->id == STI_MIXER_MAIN ? 257 446 compo->vtg_main : compo->vtg_aux, &gdp->vtg_field_nb)) 258 447 DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n"); 259 448 260 449 if (gdp->clk_pix) 261 450 clk_disable_unprepare(gdp->clk_pix); 262 451 263 - return 0; 452 + gdp->plane.status = STI_PLANE_DISABLED; 264 453 } 265 454 266 455 /** ··· 278 467 unsigned long event, void *data) 279 468 { 280 469 struct sti_gdp *gdp = container_of(nb, struct sti_gdp, vtg_field_nb); 470 + 471 + if (gdp->plane.status == STI_PLANE_FLUSHING) { 472 + /* disable need to be synchronize on vsync event */ 473 + DRM_DEBUG_DRIVER("Vsync event received => disable %s\n", 474 + sti_plane_to_str(&gdp->plane)); 475 + 476 + sti_gdp_disable(gdp); 477 + } 281 478 282 479 switch (event) { 283 480 case VTG_TOP_FIELD_EVENT: ··· 380 561 } 381 562 } 382 563 383 - static const struct sti_plane_funcs gdp_plane_ops = { 384 - .get_formats = sti_gdp_get_formats, 385 - .get_nb_formats = sti_gdp_get_nb_formats, 386 - .prepare = sti_gdp_prepare, 387 - .commit = sti_gdp_commit, 388 - .disable = sti_gdp_disable, 564 + static void sti_gdp_atomic_update(struct drm_plane *drm_plane, 565 + struct drm_plane_state *oldstate) 566 + { 567 + struct drm_plane_state *state = drm_plane->state; 568 + struct sti_plane *plane = to_sti_plane(drm_plane); 569 + struct sti_gdp *gdp = to_sti_gdp(plane); 570 + struct drm_crtc *crtc = state->crtc; 571 + struct sti_compositor *compo = dev_get_drvdata(gdp->dev); 572 + struct drm_framebuffer *fb = state->fb; 573 + bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false; 574 + struct sti_mixer *mixer; 575 + struct drm_display_mode *mode; 576 + int dst_x, dst_y, dst_w, dst_h; 577 + int src_x, src_y, src_w, src_h; 578 + struct drm_gem_cma_object *cma_obj; 579 + struct sti_gdp_node_list *list; 580 + struct sti_gdp_node_list *curr_list; 581 + struct sti_gdp_node *top_field, *btm_field; 582 + u32 dma_updated_top; 583 + u32 dma_updated_btm; 584 + int format; 585 + unsigned int depth, bpp; 586 + u32 ydo, xdo, yds, xds; 587 + int res; 588 + 589 + /* Manage the case where crtc is null (disabled) */ 590 + if (!crtc) 591 + return; 592 + 593 + mixer = to_sti_mixer(crtc); 594 + mode = &crtc->mode; 595 + dst_x = state->crtc_x; 596 + dst_y = state->crtc_y; 597 + dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x); 598 + dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y); 599 + /* src_x are in 16.16 format */ 600 + src_x = state->src_x >> 16; 601 + src_y = state->src_y >> 16; 602 + src_w = state->src_w >> 16; 603 + src_h = state->src_h >> 16; 604 + 605 + DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n", 606 + crtc->base.id, sti_mixer_to_str(mixer), 607 + drm_plane->base.id, sti_plane_to_str(plane)); 608 + DRM_DEBUG_KMS("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n", 609 + sti_plane_to_str(plane), 610 + dst_w, dst_h, dst_x, dst_y, 611 + src_w, src_h, src_x, src_y); 612 + 613 + list = sti_gdp_get_free_nodes(gdp); 614 + top_field = list->top_field; 615 + btm_field = list->btm_field; 616 + 617 + dev_dbg(gdp->dev, "%s %s top_node:0x%p btm_node:0x%p\n", __func__, 618 + sti_plane_to_str(plane), top_field, btm_field); 619 + 620 + /* build the top field */ 621 + top_field->gam_gdp_agc = GAM_GDP_AGC_FULL_RANGE; 622 + top_field->gam_gdp_ctl = WAIT_NEXT_VSYNC; 623 + format = sti_gdp_fourcc2format(fb->pixel_format); 624 + if (format == -1) { 625 + DRM_ERROR("Format not supported by GDP %.4s\n", 626 + (char *)&fb->pixel_format); 627 + return; 628 + } 629 + top_field->gam_gdp_ctl |= format; 630 + top_field->gam_gdp_ctl |= sti_gdp_get_alpharange(format); 631 + top_field->gam_gdp_ppt &= ~GAM_GDP_PPT_IGNORE; 632 + 633 + cma_obj = drm_fb_cma_get_gem_obj(fb, 0); 634 + if (!cma_obj) { 635 + DRM_ERROR("Can't get CMA GEM object for fb\n"); 636 + return; 637 + } 638 + 639 + DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id, 640 + (char *)&fb->pixel_format, 641 + (unsigned long)cma_obj->paddr); 642 + 643 + /* pixel memory location */ 644 + drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp); 645 + top_field->gam_gdp_pml = (u32)cma_obj->paddr + fb->offsets[0]; 646 + top_field->gam_gdp_pml += src_x * (bpp >> 3); 647 + top_field->gam_gdp_pml += src_y * fb->pitches[0]; 648 + 649 + /* input parameters */ 650 + top_field->gam_gdp_pmp = fb->pitches[0]; 651 + top_field->gam_gdp_size = clamp_val(src_h, 0, GAM_GDP_SIZE_MAX) << 16 | 652 + clamp_val(src_w, 0, GAM_GDP_SIZE_MAX); 653 + 654 + /* output parameters */ 655 + ydo = sti_vtg_get_line_number(*mode, dst_y); 656 + yds = sti_vtg_get_line_number(*mode, dst_y + dst_h - 1); 657 + xdo = sti_vtg_get_pixel_number(*mode, dst_x); 658 + xds = sti_vtg_get_pixel_number(*mode, dst_x + dst_w - 1); 659 + top_field->gam_gdp_vpo = (ydo << 16) | xdo; 660 + top_field->gam_gdp_vps = (yds << 16) | xds; 661 + 662 + /* Same content and chained together */ 663 + memcpy(btm_field, top_field, sizeof(*btm_field)); 664 + top_field->gam_gdp_nvn = list->btm_field_paddr; 665 + btm_field->gam_gdp_nvn = list->top_field_paddr; 666 + 667 + /* Interlaced mode */ 668 + if (mode->flags & DRM_MODE_FLAG_INTERLACE) 669 + btm_field->gam_gdp_pml = top_field->gam_gdp_pml + 670 + fb->pitches[0]; 671 + 672 + if (first_prepare) { 673 + /* Register gdp callback */ 674 + if (sti_vtg_register_client(mixer->id == STI_MIXER_MAIN ? 675 + compo->vtg_main : compo->vtg_aux, 676 + &gdp->vtg_field_nb, mixer->id)) { 677 + DRM_ERROR("Cannot register VTG notifier\n"); 678 + return; 679 + } 680 + 681 + /* Set and enable gdp clock */ 682 + if (gdp->clk_pix) { 683 + struct clk *clkp; 684 + int rate = mode->clock * 1000; 685 + 686 + /* According to the mixer used, the gdp pixel clock 687 + * should have a different parent clock. */ 688 + if (mixer->id == STI_MIXER_MAIN) 689 + clkp = gdp->clk_main_parent; 690 + else 691 + clkp = gdp->clk_aux_parent; 692 + 693 + if (clkp) 694 + clk_set_parent(gdp->clk_pix, clkp); 695 + 696 + res = clk_set_rate(gdp->clk_pix, rate); 697 + if (res < 0) { 698 + DRM_ERROR("Cannot set rate (%dHz) for gdp\n", 699 + rate); 700 + return; 701 + } 702 + 703 + if (clk_prepare_enable(gdp->clk_pix)) { 704 + DRM_ERROR("Failed to prepare/enable gdp\n"); 705 + return; 706 + } 707 + } 708 + } 709 + 710 + /* Update the NVN field of the 'right' field of the current GDP node 711 + * (being used by the HW) with the address of the updated ('free') top 712 + * field GDP node. 713 + * - In interlaced mode the 'right' field is the bottom field as we 714 + * update frames starting from their top field 715 + * - In progressive mode, we update both bottom and top fields which 716 + * are equal nodes. 717 + * At the next VSYNC, the updated node list will be used by the HW. 718 + */ 719 + curr_list = sti_gdp_get_current_nodes(gdp); 720 + dma_updated_top = list->top_field_paddr; 721 + dma_updated_btm = list->btm_field_paddr; 722 + 723 + dev_dbg(gdp->dev, "Current NVN:0x%X\n", 724 + readl(gdp->regs + GAM_GDP_NVN_OFFSET)); 725 + dev_dbg(gdp->dev, "Posted buff: %lx current buff: %x\n", 726 + (unsigned long)cma_obj->paddr, 727 + readl(gdp->regs + GAM_GDP_PML_OFFSET)); 728 + 729 + if (!curr_list) { 730 + /* First update or invalid node should directly write in the 731 + * hw register */ 732 + DRM_DEBUG_DRIVER("%s first update (or invalid node)", 733 + sti_plane_to_str(plane)); 734 + 735 + writel(gdp->is_curr_top ? 736 + dma_updated_btm : dma_updated_top, 737 + gdp->regs + GAM_GDP_NVN_OFFSET); 738 + goto end; 739 + } 740 + 741 + if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 742 + if (gdp->is_curr_top) { 743 + /* Do not update in the middle of the frame, but 744 + * postpone the update after the bottom field has 745 + * been displayed */ 746 + curr_list->btm_field->gam_gdp_nvn = dma_updated_top; 747 + } else { 748 + /* Direct update to avoid one frame delay */ 749 + writel(dma_updated_top, 750 + gdp->regs + GAM_GDP_NVN_OFFSET); 751 + } 752 + } else { 753 + /* Direct update for progressive to avoid one frame delay */ 754 + writel(dma_updated_top, gdp->regs + GAM_GDP_NVN_OFFSET); 755 + } 756 + 757 + end: 758 + plane->status = STI_PLANE_UPDATED; 759 + } 760 + 761 + static void sti_gdp_atomic_disable(struct drm_plane *drm_plane, 762 + struct drm_plane_state *oldstate) 763 + { 764 + struct sti_plane *plane = to_sti_plane(drm_plane); 765 + struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc); 766 + 767 + if (!drm_plane->crtc) { 768 + DRM_DEBUG_DRIVER("drm plane:%d not enabled\n", 769 + drm_plane->base.id); 770 + return; 771 + } 772 + 773 + DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n", 774 + drm_plane->crtc->base.id, sti_mixer_to_str(mixer), 775 + drm_plane->base.id, sti_plane_to_str(plane)); 776 + 777 + plane->status = STI_PLANE_DISABLING; 778 + } 779 + 780 + static const struct drm_plane_helper_funcs sti_gdp_helpers_funcs = { 781 + .atomic_update = sti_gdp_atomic_update, 782 + .atomic_disable = sti_gdp_atomic_disable, 389 783 }; 390 784 391 - struct sti_plane *sti_gdp_create(struct device *dev, int desc, 392 - void __iomem *baseaddr) 785 + struct drm_plane *sti_gdp_create(struct drm_device *drm_dev, 786 + struct device *dev, int desc, 787 + void __iomem *baseaddr, 788 + unsigned int possible_crtcs, 789 + enum drm_plane_type type) 393 790 { 394 791 struct sti_gdp *gdp; 792 + int res; 395 793 396 794 gdp = devm_kzalloc(dev, sizeof(*gdp), GFP_KERNEL); 397 795 if (!gdp) { ··· 619 583 gdp->dev = dev; 620 584 gdp->regs = baseaddr; 621 585 gdp->plane.desc = desc; 622 - gdp->plane.ops = &gdp_plane_ops; 586 + gdp->plane.status = STI_PLANE_DISABLED; 623 587 624 588 gdp->vtg_field_nb.notifier_call = sti_gdp_field_cb; 625 589 626 590 sti_gdp_init(gdp); 627 591 628 - return &gdp->plane; 592 + res = drm_universal_plane_init(drm_dev, &gdp->plane.drm_plane, 593 + possible_crtcs, 594 + &sti_plane_helpers_funcs, 595 + gdp_supported_formats, 596 + ARRAY_SIZE(gdp_supported_formats), 597 + type); 598 + if (res) { 599 + DRM_ERROR("Failed to initialize universal plane\n"); 600 + goto err; 601 + } 602 + 603 + drm_plane_helper_add(&gdp->plane.drm_plane, &sti_gdp_helpers_funcs); 604 + 605 + sti_plane_init_property(&gdp->plane, type); 606 + 607 + return &gdp->plane.drm_plane; 608 + 609 + err: 610 + devm_kfree(dev, gdp); 611 + return NULL; 629 612 }
+5 -3
drivers/gpu/drm/sti/sti_gdp.h
··· 11 11 12 12 #include <linux/types.h> 13 13 14 - struct sti_plane *sti_gdp_create(struct device *dev, int desc, 15 - void __iomem *baseaddr); 16 - 14 + struct drm_plane *sti_gdp_create(struct drm_device *drm_dev, 15 + struct device *dev, int desc, 16 + void __iomem *baseaddr, 17 + unsigned int possible_crtcs, 18 + enum drm_plane_type type); 17 19 #endif
+230 -199
drivers/gpu/drm/sti/sti_hqvdp.c
··· 12 12 #include <linux/reset.h> 13 13 14 14 #include <drm/drmP.h> 15 + #include <drm/drm_fb_cma_helper.h> 16 + #include <drm/drm_gem_cma_helper.h> 15 17 18 + #include "sti_compositor.h" 16 19 #include "sti_hqvdp_lut.h" 17 20 #include "sti_plane.h" 18 21 #include "sti_vtg.h" ··· 360 357 DRM_FORMAT_NV12, 361 358 }; 362 359 363 - static const uint32_t *sti_hqvdp_get_formats(struct sti_plane *plane) 364 - { 365 - return hqvdp_supported_formats; 366 - } 367 - 368 - static unsigned int sti_hqvdp_get_nb_formats(struct sti_plane *plane) 369 - { 370 - return ARRAY_SIZE(hqvdp_supported_formats); 371 - } 372 - 373 360 /** 374 361 * sti_hqvdp_get_free_cmd 375 362 * @hqvdp: hqvdp structure ··· 475 482 476 483 /** 477 484 * sti_hqvdp_check_hw_scaling 478 - * @plane: hqvdp plane 485 + * @hqvdp: hqvdp pointer 486 + * @mode: display mode with timing constraints 487 + * @src_w: source width 488 + * @src_h: source height 489 + * @dst_w: destination width 490 + * @dst_h: destination height 479 491 * 480 492 * Check if the HW is able to perform the scaling request 481 493 * The firmware scaling limitation is "CEIL(1/Zy) <= FLOOR(LFW)" where: ··· 494 496 * RETURNS: 495 497 * True if the HW can scale. 496 498 */ 497 - static bool sti_hqvdp_check_hw_scaling(struct sti_plane *plane) 499 + static bool sti_hqvdp_check_hw_scaling(struct sti_hqvdp *hqvdp, 500 + struct drm_display_mode *mode, 501 + int src_w, int src_h, 502 + int dst_w, int dst_h) 498 503 { 499 - struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane); 500 504 unsigned long lfw; 501 505 unsigned int inv_zy; 502 506 503 - lfw = plane->mode->htotal * (clk_get_rate(hqvdp->clk) / 1000000); 504 - lfw /= max(plane->src_w, plane->dst_w) * plane->mode->clock / 1000; 507 + lfw = mode->htotal * (clk_get_rate(hqvdp->clk) / 1000000); 508 + lfw /= max(src_w, dst_w) * mode->clock / 1000; 505 509 506 - inv_zy = DIV_ROUND_UP(plane->src_h, plane->dst_h); 510 + inv_zy = DIV_ROUND_UP(src_h, dst_h); 507 511 508 512 return (inv_zy <= lfw) ? true : false; 509 513 } 510 514 511 515 /** 512 - * sti_hqvdp_prepare 513 - * @plane: hqvdp plane 514 - * @first_prepare: true if it is the first time this function is called 515 - * 516 - * Prepares a command for the firmware 517 - * 518 - * RETURNS: 519 - * 0 on success. 520 - */ 521 - static int sti_hqvdp_prepare(struct sti_plane *plane, bool first_prepare) 522 - { 523 - struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane); 524 - struct sti_hqvdp_cmd *cmd; 525 - int scale_h, scale_v; 526 - int cmd_offset; 527 - 528 - dev_dbg(hqvdp->dev, "%s %s\n", __func__, sti_plane_to_str(plane)); 529 - 530 - cmd_offset = sti_hqvdp_get_free_cmd(hqvdp); 531 - if (cmd_offset == -1) { 532 - DRM_ERROR("No available hqvdp_cmd now\n"); 533 - return -EBUSY; 534 - } 535 - cmd = hqvdp->hqvdp_cmd + cmd_offset; 536 - 537 - if (!sti_hqvdp_check_hw_scaling(plane)) { 538 - DRM_ERROR("Scaling beyond HW capabilities\n"); 539 - return -EINVAL; 540 - } 541 - 542 - /* Static parameters, defaulting to progressive mode */ 543 - cmd->top.config = TOP_CONFIG_PROGRESSIVE; 544 - cmd->top.mem_format = TOP_MEM_FORMAT_DFLT; 545 - cmd->hvsrc.param_ctrl = HVSRC_PARAM_CTRL_DFLT; 546 - cmd->csdi.config = CSDI_CONFIG_PROG; 547 - 548 - /* VC1RE, FMD bypassed : keep everything set to 0 549 - * IQI/P2I bypassed */ 550 - cmd->iqi.config = IQI_CONFIG_DFLT; 551 - cmd->iqi.con_bri = IQI_CON_BRI_DFLT; 552 - cmd->iqi.sat_gain = IQI_SAT_GAIN_DFLT; 553 - cmd->iqi.pxf_conf = IQI_PXF_CONF_DFLT; 554 - 555 - /* Buffer planes address */ 556 - cmd->top.current_luma = (u32)plane->paddr + plane->offsets[0]; 557 - cmd->top.current_chroma = (u32)plane->paddr + plane->offsets[1]; 558 - 559 - /* Pitches */ 560 - cmd->top.luma_processed_pitch = cmd->top.luma_src_pitch = 561 - plane->pitches[0]; 562 - cmd->top.chroma_processed_pitch = cmd->top.chroma_src_pitch = 563 - plane->pitches[1]; 564 - 565 - /* Input / output size 566 - * Align to upper even value */ 567 - plane->dst_w = ALIGN(plane->dst_w, 2); 568 - plane->dst_h = ALIGN(plane->dst_h, 2); 569 - 570 - if ((plane->src_w > MAX_WIDTH) || (plane->src_w < MIN_WIDTH) || 571 - (plane->src_h > MAX_HEIGHT) || (plane->src_h < MIN_HEIGHT) || 572 - (plane->dst_w > MAX_WIDTH) || (plane->dst_w < MIN_WIDTH) || 573 - (plane->dst_h > MAX_HEIGHT) || (plane->dst_h < MIN_HEIGHT)) { 574 - DRM_ERROR("Invalid in/out size %dx%d -> %dx%d\n", 575 - plane->src_w, plane->src_h, 576 - plane->dst_w, plane->dst_h); 577 - return -EINVAL; 578 - } 579 - cmd->top.input_viewport_size = cmd->top.input_frame_size = 580 - plane->src_h << 16 | plane->src_w; 581 - cmd->hvsrc.output_picture_size = plane->dst_h << 16 | plane->dst_w; 582 - cmd->top.input_viewport_ori = plane->src_y << 16 | plane->src_x; 583 - 584 - /* Handle interlaced */ 585 - if (plane->fb->flags & DRM_MODE_FB_INTERLACED) { 586 - /* Top field to display */ 587 - cmd->top.config = TOP_CONFIG_INTER_TOP; 588 - 589 - /* Update pitches and vert size */ 590 - cmd->top.input_frame_size = (plane->src_h / 2) << 16 | 591 - plane->src_w; 592 - cmd->top.luma_processed_pitch *= 2; 593 - cmd->top.luma_src_pitch *= 2; 594 - cmd->top.chroma_processed_pitch *= 2; 595 - cmd->top.chroma_src_pitch *= 2; 596 - 597 - /* Enable directional deinterlacing processing */ 598 - cmd->csdi.config = CSDI_CONFIG_INTER_DIR; 599 - cmd->csdi.config2 = CSDI_CONFIG2_DFLT; 600 - cmd->csdi.dcdi_config = CSDI_DCDI_CONFIG_DFLT; 601 - } 602 - 603 - /* Update hvsrc lut coef */ 604 - scale_h = SCALE_FACTOR * plane->dst_w / plane->src_w; 605 - sti_hqvdp_update_hvsrc(HVSRC_HORI, scale_h, &cmd->hvsrc); 606 - 607 - scale_v = SCALE_FACTOR * plane->dst_h / plane->src_h; 608 - sti_hqvdp_update_hvsrc(HVSRC_VERT, scale_v, &cmd->hvsrc); 609 - 610 - if (first_prepare) { 611 - /* Prevent VTG shutdown */ 612 - if (clk_prepare_enable(hqvdp->clk_pix_main)) { 613 - DRM_ERROR("Failed to prepare/enable pix main clk\n"); 614 - return -ENXIO; 615 - } 616 - 617 - /* Register VTG Vsync callback to handle bottom fields */ 618 - if ((plane->fb->flags & DRM_MODE_FB_INTERLACED) && 619 - sti_vtg_register_client(hqvdp->vtg, &hqvdp->vtg_nb, 620 - plane->mixer_id)) { 621 - DRM_ERROR("Cannot register VTG notifier\n"); 622 - return -ENXIO; 623 - } 624 - } 625 - 626 - return 0; 627 - } 628 - 629 - /** 630 - * sti_hqvdp_commit 631 - * @plane: hqvdp plane 632 - * 633 - * Enables the HQVDP plane 634 - * 635 - * RETURNS: 636 - * 0 on success. 637 - */ 638 - static int sti_hqvdp_commit(struct sti_plane *plane) 639 - { 640 - struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane); 641 - int cmd_offset; 642 - 643 - dev_dbg(hqvdp->dev, "%s %s\n", __func__, sti_plane_to_str(plane)); 644 - 645 - cmd_offset = sti_hqvdp_get_free_cmd(hqvdp); 646 - if (cmd_offset == -1) { 647 - DRM_ERROR("No available hqvdp_cmd now\n"); 648 - return -EBUSY; 649 - } 650 - 651 - writel(hqvdp->hqvdp_cmd_paddr + cmd_offset, 652 - hqvdp->regs + HQVDP_MBX_NEXT_CMD); 653 - 654 - hqvdp->curr_field_count++; 655 - 656 - /* Interlaced : get ready to display the bottom field at next Vsync */ 657 - if (plane->fb->flags & DRM_MODE_FB_INTERLACED) 658 - hqvdp->btm_field_pending = true; 659 - 660 - dev_dbg(hqvdp->dev, "%s Posted command:0x%x\n", 661 - __func__, hqvdp->hqvdp_cmd_paddr + cmd_offset); 662 - 663 - return 0; 664 - } 665 - 666 - /** 667 516 * sti_hqvdp_disable 668 - * @plane: hqvdp plane 517 + * @hqvdp: hqvdp pointer 669 518 * 670 519 * Disables the HQVDP plane 671 - * 672 - * RETURNS: 673 - * 0 on success. 674 520 */ 675 - static int sti_hqvdp_disable(struct sti_plane *plane) 521 + static void sti_hqvdp_disable(struct sti_hqvdp *hqvdp) 676 522 { 677 - struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane); 678 523 int i; 679 524 680 - DRM_DEBUG_DRIVER("%s\n", sti_plane_to_str(plane)); 525 + DRM_DEBUG_DRIVER("%s\n", sti_plane_to_str(&hqvdp->plane)); 681 526 682 527 /* Unregister VTG Vsync callback */ 683 - if ((plane->fb->flags & DRM_MODE_FB_INTERLACED) && 684 - sti_vtg_unregister_client(hqvdp->vtg, &hqvdp->vtg_nb)) 528 + if (sti_vtg_unregister_client(hqvdp->vtg, &hqvdp->vtg_nb)) 685 529 DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n"); 686 530 687 531 /* Set next cmd to NULL */ ··· 539 699 /* VTG can stop now */ 540 700 clk_disable_unprepare(hqvdp->clk_pix_main); 541 701 542 - if (i == POLL_MAX_ATTEMPT) { 702 + if (i == POLL_MAX_ATTEMPT) 543 703 DRM_ERROR("XP70 could not revert to idle\n"); 544 - return -ENXIO; 545 - } 546 704 547 - return 0; 705 + hqvdp->plane.status = STI_PLANE_DISABLED; 548 706 } 549 707 550 708 /** ··· 565 727 if ((evt != VTG_TOP_FIELD_EVENT) && (evt != VTG_BOTTOM_FIELD_EVENT)) { 566 728 DRM_DEBUG_DRIVER("Unknown event\n"); 567 729 return 0; 730 + } 731 + 732 + if (hqvdp->plane.status == STI_PLANE_FLUSHING) { 733 + /* disable need to be synchronize on vsync event */ 734 + DRM_DEBUG_DRIVER("Vsync event received => disable %s\n", 735 + sti_plane_to_str(&hqvdp->plane)); 736 + 737 + sti_hqvdp_disable(hqvdp); 568 738 } 569 739 570 740 if (hqvdp->btm_field_pending) { ··· 628 782 memset(hqvdp->hqvdp_cmd, 0, size); 629 783 } 630 784 631 - static const struct sti_plane_funcs hqvdp_plane_ops = { 632 - .get_formats = sti_hqvdp_get_formats, 633 - .get_nb_formats = sti_hqvdp_get_nb_formats, 634 - .prepare = sti_hqvdp_prepare, 635 - .commit = sti_hqvdp_commit, 636 - .disable = sti_hqvdp_disable, 785 + static void sti_hqvdp_atomic_update(struct drm_plane *drm_plane, 786 + struct drm_plane_state *oldstate) 787 + { 788 + struct drm_plane_state *state = drm_plane->state; 789 + struct sti_plane *plane = to_sti_plane(drm_plane); 790 + struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane); 791 + struct drm_crtc *crtc = state->crtc; 792 + struct sti_mixer *mixer = to_sti_mixer(crtc); 793 + struct drm_framebuffer *fb = state->fb; 794 + struct drm_display_mode *mode = &crtc->mode; 795 + int dst_x = state->crtc_x; 796 + int dst_y = state->crtc_y; 797 + int dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x); 798 + int dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y); 799 + /* src_x are in 16.16 format */ 800 + int src_x = state->src_x >> 16; 801 + int src_y = state->src_y >> 16; 802 + int src_w = state->src_w >> 16; 803 + int src_h = state->src_h >> 16; 804 + bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false; 805 + struct drm_gem_cma_object *cma_obj; 806 + struct sti_hqvdp_cmd *cmd; 807 + int scale_h, scale_v; 808 + int cmd_offset; 809 + 810 + DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n", 811 + crtc->base.id, sti_mixer_to_str(mixer), 812 + drm_plane->base.id, sti_plane_to_str(plane)); 813 + DRM_DEBUG_KMS("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n", 814 + sti_plane_to_str(plane), 815 + dst_w, dst_h, dst_x, dst_y, 816 + src_w, src_h, src_x, src_y); 817 + 818 + cmd_offset = sti_hqvdp_get_free_cmd(hqvdp); 819 + if (cmd_offset == -1) { 820 + DRM_ERROR("No available hqvdp_cmd now\n"); 821 + return; 822 + } 823 + cmd = hqvdp->hqvdp_cmd + cmd_offset; 824 + 825 + if (!sti_hqvdp_check_hw_scaling(hqvdp, mode, 826 + src_w, src_h, 827 + dst_w, dst_h)) { 828 + DRM_ERROR("Scaling beyond HW capabilities\n"); 829 + return; 830 + } 831 + 832 + /* Static parameters, defaulting to progressive mode */ 833 + cmd->top.config = TOP_CONFIG_PROGRESSIVE; 834 + cmd->top.mem_format = TOP_MEM_FORMAT_DFLT; 835 + cmd->hvsrc.param_ctrl = HVSRC_PARAM_CTRL_DFLT; 836 + cmd->csdi.config = CSDI_CONFIG_PROG; 837 + 838 + /* VC1RE, FMD bypassed : keep everything set to 0 839 + * IQI/P2I bypassed */ 840 + cmd->iqi.config = IQI_CONFIG_DFLT; 841 + cmd->iqi.con_bri = IQI_CON_BRI_DFLT; 842 + cmd->iqi.sat_gain = IQI_SAT_GAIN_DFLT; 843 + cmd->iqi.pxf_conf = IQI_PXF_CONF_DFLT; 844 + 845 + cma_obj = drm_fb_cma_get_gem_obj(fb, 0); 846 + if (!cma_obj) { 847 + DRM_ERROR("Can't get CMA GEM object for fb\n"); 848 + return; 849 + } 850 + 851 + DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id, 852 + (char *)&fb->pixel_format, 853 + (unsigned long)cma_obj->paddr); 854 + 855 + /* Buffer planes address */ 856 + cmd->top.current_luma = (u32)cma_obj->paddr + fb->offsets[0]; 857 + cmd->top.current_chroma = (u32)cma_obj->paddr + fb->offsets[1]; 858 + 859 + /* Pitches */ 860 + cmd->top.luma_processed_pitch = fb->pitches[0]; 861 + cmd->top.luma_src_pitch = fb->pitches[0]; 862 + cmd->top.chroma_processed_pitch = fb->pitches[1]; 863 + cmd->top.chroma_src_pitch = fb->pitches[1]; 864 + 865 + /* Input / output size 866 + * Align to upper even value */ 867 + dst_w = ALIGN(dst_w, 2); 868 + dst_h = ALIGN(dst_h, 2); 869 + 870 + if ((src_w > MAX_WIDTH) || (src_w < MIN_WIDTH) || 871 + (src_h > MAX_HEIGHT) || (src_h < MIN_HEIGHT) || 872 + (dst_w > MAX_WIDTH) || (dst_w < MIN_WIDTH) || 873 + (dst_h > MAX_HEIGHT) || (dst_h < MIN_HEIGHT)) { 874 + DRM_ERROR("Invalid in/out size %dx%d -> %dx%d\n", 875 + src_w, src_h, 876 + dst_w, dst_h); 877 + return; 878 + } 879 + 880 + cmd->top.input_viewport_size = src_h << 16 | src_w; 881 + cmd->top.input_frame_size = src_h << 16 | src_w; 882 + cmd->hvsrc.output_picture_size = dst_h << 16 | dst_w; 883 + cmd->top.input_viewport_ori = src_y << 16 | src_x; 884 + 885 + /* Handle interlaced */ 886 + if (fb->flags & DRM_MODE_FB_INTERLACED) { 887 + /* Top field to display */ 888 + cmd->top.config = TOP_CONFIG_INTER_TOP; 889 + 890 + /* Update pitches and vert size */ 891 + cmd->top.input_frame_size = (src_h / 2) << 16 | src_w; 892 + cmd->top.luma_processed_pitch *= 2; 893 + cmd->top.luma_src_pitch *= 2; 894 + cmd->top.chroma_processed_pitch *= 2; 895 + cmd->top.chroma_src_pitch *= 2; 896 + 897 + /* Enable directional deinterlacing processing */ 898 + cmd->csdi.config = CSDI_CONFIG_INTER_DIR; 899 + cmd->csdi.config2 = CSDI_CONFIG2_DFLT; 900 + cmd->csdi.dcdi_config = CSDI_DCDI_CONFIG_DFLT; 901 + } 902 + 903 + /* Update hvsrc lut coef */ 904 + scale_h = SCALE_FACTOR * dst_w / src_w; 905 + sti_hqvdp_update_hvsrc(HVSRC_HORI, scale_h, &cmd->hvsrc); 906 + 907 + scale_v = SCALE_FACTOR * dst_h / src_h; 908 + sti_hqvdp_update_hvsrc(HVSRC_VERT, scale_v, &cmd->hvsrc); 909 + 910 + if (first_prepare) { 911 + /* Prevent VTG shutdown */ 912 + if (clk_prepare_enable(hqvdp->clk_pix_main)) { 913 + DRM_ERROR("Failed to prepare/enable pix main clk\n"); 914 + return; 915 + } 916 + 917 + /* Register VTG Vsync callback to handle bottom fields */ 918 + if (sti_vtg_register_client(hqvdp->vtg, 919 + &hqvdp->vtg_nb, 920 + mixer->id)) { 921 + DRM_ERROR("Cannot register VTG notifier\n"); 922 + return; 923 + } 924 + } 925 + 926 + writel(hqvdp->hqvdp_cmd_paddr + cmd_offset, 927 + hqvdp->regs + HQVDP_MBX_NEXT_CMD); 928 + 929 + hqvdp->curr_field_count++; 930 + 931 + /* Interlaced : get ready to display the bottom field at next Vsync */ 932 + if (fb->flags & DRM_MODE_FB_INTERLACED) 933 + hqvdp->btm_field_pending = true; 934 + 935 + dev_dbg(hqvdp->dev, "%s Posted command:0x%x\n", 936 + __func__, hqvdp->hqvdp_cmd_paddr + cmd_offset); 937 + 938 + plane->status = STI_PLANE_UPDATED; 939 + } 940 + 941 + static void sti_hqvdp_atomic_disable(struct drm_plane *drm_plane, 942 + struct drm_plane_state *oldstate) 943 + { 944 + struct sti_plane *plane = to_sti_plane(drm_plane); 945 + struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc); 946 + 947 + if (!drm_plane->crtc) { 948 + DRM_DEBUG_DRIVER("drm plane:%d not enabled\n", 949 + drm_plane->base.id); 950 + return; 951 + } 952 + 953 + DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n", 954 + drm_plane->crtc->base.id, sti_mixer_to_str(mixer), 955 + drm_plane->base.id, sti_plane_to_str(plane)); 956 + 957 + plane->status = STI_PLANE_DISABLING; 958 + } 959 + 960 + static const struct drm_plane_helper_funcs sti_hqvdp_helpers_funcs = { 961 + .atomic_update = sti_hqvdp_atomic_update, 962 + .atomic_disable = sti_hqvdp_atomic_disable, 637 963 }; 638 964 639 - struct sti_plane *sti_hqvdp_create(struct device *dev, int desc) 965 + static struct drm_plane *sti_hqvdp_create(struct drm_device *drm_dev, 966 + struct device *dev, int desc) 640 967 { 641 968 struct sti_hqvdp *hqvdp = dev_get_drvdata(dev); 969 + int res; 642 970 643 971 hqvdp->plane.desc = desc; 644 - hqvdp->plane.ops = &hqvdp_plane_ops; 972 + hqvdp->plane.status = STI_PLANE_DISABLED; 645 973 646 974 sti_hqvdp_init(hqvdp); 647 975 648 - return &hqvdp->plane; 976 + res = drm_universal_plane_init(drm_dev, &hqvdp->plane.drm_plane, 1, 977 + &sti_plane_helpers_funcs, 978 + hqvdp_supported_formats, 979 + ARRAY_SIZE(hqvdp_supported_formats), 980 + DRM_PLANE_TYPE_OVERLAY); 981 + if (res) { 982 + DRM_ERROR("Failed to initialize universal plane\n"); 983 + return NULL; 984 + } 985 + 986 + drm_plane_helper_add(&hqvdp->plane.drm_plane, &sti_hqvdp_helpers_funcs); 987 + 988 + sti_plane_init_property(&hqvdp->plane, DRM_PLANE_TYPE_OVERLAY); 989 + 990 + return &hqvdp->plane.drm_plane; 649 991 } 650 992 651 993 static void sti_hqvdp_init_plugs(struct sti_hqvdp *hqvdp) ··· 982 948 { 983 949 struct sti_hqvdp *hqvdp = dev_get_drvdata(dev); 984 950 struct drm_device *drm_dev = data; 985 - struct sti_plane *plane; 951 + struct drm_plane *plane; 986 952 int err; 987 953 988 954 DRM_DEBUG_DRIVER("\n"); ··· 999 965 } 1000 966 1001 967 /* Create HQVDP plane once xp70 is initialized */ 1002 - plane = sti_hqvdp_create(hqvdp->dev, STI_HQVDP_0); 1003 - if (plane) 1004 - sti_plane_init(hqvdp->drm_dev, plane, 1, 1005 - DRM_PLANE_TYPE_OVERLAY); 1006 - else 968 + plane = sti_hqvdp_create(drm_dev, hqvdp->dev, STI_HQVDP_0); 969 + if (!plane) 1007 970 DRM_ERROR("Can't create HQVDP plane\n"); 1008 971 1009 972 return 0;
+1 -9
drivers/gpu/drm/sti/sti_mixer.c
··· 58 58 return "<UNKNOWN MIXER>"; 59 59 } 60 60 } 61 + EXPORT_SYMBOL(sti_mixer_to_str); 61 62 62 63 static inline u32 sti_mixer_reg_read(struct sti_mixer *mixer, u32 reg_id) 63 64 { ··· 224 223 sti_mixer_reg_write(mixer, GAM_MIXER_CTL, val); 225 224 226 225 return 0; 227 - } 228 - 229 - void sti_mixer_clear_all_planes(struct sti_mixer *mixer) 230 - { 231 - u32 val; 232 - 233 - DRM_DEBUG_DRIVER("%s clear all planes\n", sti_mixer_to_str(mixer)); 234 - val = sti_mixer_reg_read(mixer, GAM_MIXER_CTL) & 0xFFFF0000; 235 - sti_mixer_reg_write(mixer, GAM_MIXER_CTL, val); 236 226 } 237 227 238 228 void sti_mixer_set_matrix(struct sti_mixer *mixer)
+8 -3
drivers/gpu/drm/sti/sti_mixer.h
··· 15 15 16 16 #define to_sti_mixer(x) container_of(x, struct sti_mixer, drm_crtc) 17 17 18 + enum sti_mixer_status { 19 + STI_MIXER_READY, 20 + STI_MIXER_DISABLING, 21 + STI_MIXER_DISABLED, 22 + }; 23 + 18 24 /** 19 25 * STI Mixer subdevice structure 20 26 * ··· 29 23 * @id: id of the mixer 30 24 * @drm_crtc: crtc object link to the mixer 31 25 * @pending_event: set if a flip event is pending on crtc 32 - * @enabled: to know if the mixer is active or not 26 + * @status: to know the status of the mixer 33 27 */ 34 28 struct sti_mixer { 35 29 struct device *dev; ··· 37 31 int id; 38 32 struct drm_crtc drm_crtc; 39 33 struct drm_pending_vblank_event *pending_event; 40 - bool enabled; 34 + enum sti_mixer_status status; 41 35 }; 42 36 43 37 const char *sti_mixer_to_str(struct sti_mixer *mixer); ··· 47 41 48 42 int sti_mixer_set_plane_status(struct sti_mixer *mixer, 49 43 struct sti_plane *plane, bool status); 50 - void sti_mixer_clear_all_planes(struct sti_mixer *mixer); 51 44 int sti_mixer_set_plane_depth(struct sti_mixer *mixer, struct sti_plane *plane); 52 45 int sti_mixer_active_video_area(struct sti_mixer *mixer, 53 46 struct drm_display_mode *mode);
+16 -237
drivers/gpu/drm/sti/sti_plane.c
··· 7 7 */ 8 8 9 9 #include <drm/drmP.h> 10 - #include <drm/drm_atomic_helper.h> 11 - #include <drm/drm_gem_cma_helper.h> 12 10 #include <drm/drm_fb_cma_helper.h> 13 - #include <drm/drm_plane_helper.h> 11 + #include <drm/drm_gem_cma_helper.h> 14 12 15 13 #include "sti_compositor.h" 16 14 #include "sti_drv.h" 17 15 #include "sti_plane.h" 18 - #include "sti_vtg.h" 19 16 20 17 /* (Background) < GDP0 < GDP1 < HQVDP0 < GDP2 < GDP3 < (ForeGround) */ 21 18 enum sti_plane_desc sti_plane_default_zorder[] = { ··· 44 47 } 45 48 EXPORT_SYMBOL(sti_plane_to_str); 46 49 47 - static int sti_plane_prepare(struct sti_plane *plane, 48 - struct drm_crtc *crtc, 49 - struct drm_framebuffer *fb, 50 - struct drm_display_mode *mode, int mixer_id, 51 - int dest_x, int dest_y, int dest_w, int dest_h, 52 - int src_x, int src_y, int src_w, int src_h) 53 - { 54 - struct drm_gem_cma_object *cma_obj; 55 - unsigned int i; 56 - int res; 57 - 58 - if (!plane || !fb || !mode) { 59 - DRM_ERROR("Null fb, plane or mode\n"); 60 - return 1; 61 - } 62 - 63 - cma_obj = drm_fb_cma_get_gem_obj(fb, 0); 64 - if (!cma_obj) { 65 - DRM_ERROR("Can't get CMA GEM object for fb\n"); 66 - return 1; 67 - } 68 - 69 - plane->fb = fb; 70 - plane->mode = mode; 71 - plane->mixer_id = mixer_id; 72 - plane->dst_x = dest_x; 73 - plane->dst_y = dest_y; 74 - plane->dst_w = clamp_val(dest_w, 0, mode->crtc_hdisplay - dest_x); 75 - plane->dst_h = clamp_val(dest_h, 0, mode->crtc_vdisplay - dest_y); 76 - plane->src_x = src_x; 77 - plane->src_y = src_y; 78 - plane->src_w = src_w; 79 - plane->src_h = src_h; 80 - plane->format = fb->pixel_format; 81 - plane->vaddr = cma_obj->vaddr; 82 - plane->paddr = cma_obj->paddr; 83 - for (i = 0; i < 4; i++) { 84 - plane->pitches[i] = fb->pitches[i]; 85 - plane->offsets[i] = fb->offsets[i]; 86 - } 87 - 88 - DRM_DEBUG_DRIVER("%s is associated with mixer_id %d\n", 89 - sti_plane_to_str(plane), 90 - plane->mixer_id); 91 - DRM_DEBUG_DRIVER("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n", 92 - sti_plane_to_str(plane), 93 - plane->dst_w, plane->dst_h, plane->dst_x, plane->dst_y, 94 - plane->src_w, plane->src_h, plane->src_x, 95 - plane->src_y); 96 - 97 - DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id, 98 - (char *)&plane->format, (unsigned long)plane->paddr); 99 - 100 - if (!plane->ops->prepare) { 101 - DRM_ERROR("Cannot prepare\n"); 102 - return 1; 103 - } 104 - 105 - res = plane->ops->prepare(plane, !plane->enabled); 106 - if (res) { 107 - DRM_ERROR("Plane prepare failed\n"); 108 - return res; 109 - } 110 - 111 - plane->enabled = true; 112 - 113 - return 0; 114 - } 115 - 116 - static int sti_plane_commit(struct sti_plane *plane) 117 - { 118 - if (!plane) 119 - return 1; 120 - 121 - if (!plane->ops->commit) { 122 - DRM_ERROR("Cannot commit\n"); 123 - return 1; 124 - } 125 - 126 - return plane->ops->commit(plane); 127 - } 128 - 129 - static int sti_plane_disable(struct sti_plane *plane) 130 - { 131 - int res; 132 - 133 - DRM_DEBUG_DRIVER("%s\n", sti_plane_to_str(plane)); 134 - if (!plane) 135 - return 1; 136 - 137 - if (!plane->enabled) 138 - return 0; 139 - 140 - if (!plane->ops->disable) { 141 - DRM_ERROR("Cannot disable\n"); 142 - return 1; 143 - } 144 - 145 - res = plane->ops->disable(plane); 146 - if (res) { 147 - DRM_ERROR("Plane disable failed\n"); 148 - return res; 149 - } 150 - 151 - plane->enabled = false; 152 - 153 - return 0; 154 - } 155 - 156 50 static void sti_plane_destroy(struct drm_plane *drm_plane) 157 51 { 158 52 DRM_DEBUG_DRIVER("\n"); ··· 70 182 return -EINVAL; 71 183 } 72 184 73 - static struct drm_plane_funcs sti_plane_funcs = { 74 - .update_plane = drm_atomic_helper_update_plane, 75 - .disable_plane = drm_atomic_helper_disable_plane, 76 - .destroy = sti_plane_destroy, 77 - .set_property = sti_plane_set_property, 78 - .reset = drm_atomic_helper_plane_reset, 79 - .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, 80 - .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, 81 - }; 82 - 83 - static int sti_plane_atomic_check(struct drm_plane *drm_plane, 84 - struct drm_plane_state *state) 85 - { 86 - return 0; 87 - } 88 - 89 - static void sti_plane_atomic_update(struct drm_plane *drm_plane, 90 - struct drm_plane_state *oldstate) 91 - { 92 - struct drm_plane_state *state = drm_plane->state; 93 - struct sti_plane *plane = to_sti_plane(drm_plane); 94 - struct sti_mixer *mixer = to_sti_mixer(state->crtc); 95 - int res; 96 - 97 - DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n", 98 - state->crtc->base.id, sti_mixer_to_str(mixer), 99 - drm_plane->base.id, sti_plane_to_str(plane)); 100 - DRM_DEBUG_KMS("(%dx%d)@(%d,%d)\n", 101 - state->crtc_w, state->crtc_h, 102 - state->crtc_x, state->crtc_y); 103 - 104 - res = sti_mixer_set_plane_depth(mixer, plane); 105 - if (res) { 106 - DRM_ERROR("Cannot set plane depth\n"); 107 - return; 108 - } 109 - 110 - /* src_x are in 16.16 format */ 111 - res = sti_plane_prepare(plane, state->crtc, state->fb, 112 - &state->crtc->mode, mixer->id, 113 - state->crtc_x, state->crtc_y, 114 - state->crtc_w, state->crtc_h, 115 - state->src_x >> 16, state->src_y >> 16, 116 - state->src_w >> 16, state->src_h >> 16); 117 - if (res) { 118 - DRM_ERROR("Plane prepare failed\n"); 119 - return; 120 - } 121 - 122 - res = sti_plane_commit(plane); 123 - if (res) { 124 - DRM_ERROR("Plane commit failed\n"); 125 - return; 126 - } 127 - 128 - res = sti_mixer_set_plane_status(mixer, plane, true); 129 - if (res) { 130 - DRM_ERROR("Cannot enable plane at mixer\n"); 131 - return; 132 - } 133 - } 134 - 135 - static void sti_plane_atomic_disable(struct drm_plane *drm_plane, 136 - struct drm_plane_state *oldstate) 137 - { 138 - struct sti_plane *plane = to_sti_plane(drm_plane); 139 - struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc); 140 - int res; 141 - 142 - if (!drm_plane->crtc) { 143 - DRM_DEBUG_DRIVER("drm plane:%d not enabled\n", 144 - drm_plane->base.id); 145 - return; 146 - } 147 - 148 - DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n", 149 - drm_plane->crtc->base.id, sti_mixer_to_str(mixer), 150 - drm_plane->base.id, sti_plane_to_str(plane)); 151 - 152 - /* Disable plane at mixer level */ 153 - res = sti_mixer_set_plane_status(mixer, plane, false); 154 - if (res) { 155 - DRM_ERROR("Cannot disable plane at mixer\n"); 156 - return; 157 - } 158 - 159 - /* Wait a while to be sure that a Vsync event is received */ 160 - msleep(WAIT_NEXT_VSYNC_MS); 161 - 162 - /* Then disable plane itself */ 163 - res = sti_plane_disable(plane); 164 - if (res) { 165 - DRM_ERROR("Plane disable failed\n"); 166 - return; 167 - } 168 - } 169 - 170 - static const struct drm_plane_helper_funcs sti_plane_helpers_funcs = { 171 - .atomic_check = sti_plane_atomic_check, 172 - .atomic_update = sti_plane_atomic_update, 173 - .atomic_disable = sti_plane_atomic_disable, 174 - }; 175 - 176 185 static void sti_plane_attach_zorder_property(struct drm_plane *drm_plane) 177 186 { 178 187 struct drm_device *dev = drm_plane->dev; ··· 90 305 drm_object_attach_property(&drm_plane->base, prop, plane->zorder); 91 306 } 92 307 93 - struct drm_plane *sti_plane_init(struct drm_device *dev, 94 - struct sti_plane *plane, 95 - unsigned int possible_crtcs, 96 - enum drm_plane_type type) 308 + void sti_plane_init_property(struct sti_plane *plane, 309 + enum drm_plane_type type) 97 310 { 98 - int err, i; 99 - 100 - err = drm_universal_plane_init(dev, &plane->drm_plane, 101 - possible_crtcs, 102 - &sti_plane_funcs, 103 - plane->ops->get_formats(plane), 104 - plane->ops->get_nb_formats(plane), 105 - type); 106 - if (err) { 107 - DRM_ERROR("Failed to initialize universal plane\n"); 108 - return NULL; 109 - } 110 - 111 - drm_plane_helper_add(&plane->drm_plane, &sti_plane_helpers_funcs); 311 + unsigned int i; 112 312 113 313 for (i = 0; i < ARRAY_SIZE(sti_plane_default_zorder); i++) 114 314 if (sti_plane_default_zorder[i] == plane->desc) ··· 107 337 DRM_DEBUG_DRIVER("drm plane:%d mapped to %s with zorder:%d\n", 108 338 plane->drm_plane.base.id, 109 339 sti_plane_to_str(plane), plane->zorder); 110 - 111 - return &plane->drm_plane; 112 340 } 113 - EXPORT_SYMBOL(sti_plane_init); 341 + EXPORT_SYMBOL(sti_plane_init_property); 342 + 343 + struct drm_plane_funcs sti_plane_helpers_funcs = { 344 + .update_plane = drm_atomic_helper_update_plane, 345 + .disable_plane = drm_atomic_helper_disable_plane, 346 + .destroy = sti_plane_destroy, 347 + .set_property = sti_plane_set_property, 348 + .reset = drm_atomic_helper_plane_reset, 349 + .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, 350 + .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, 351 + }; 352 + EXPORT_SYMBOL(sti_plane_helpers_funcs);
+16 -50
drivers/gpu/drm/sti/sti_plane.h
··· 8 8 #define _STI_PLANE_H_ 9 9 10 10 #include <drm/drmP.h> 11 + #include <drm/drm_atomic_helper.h> 12 + #include <drm/drm_plane_helper.h> 13 + 14 + extern struct drm_plane_funcs sti_plane_helpers_funcs; 11 15 12 16 #define to_sti_plane(x) container_of(x, struct sti_plane, drm_plane) 13 17 ··· 42 38 STI_BACK = STI_BCK 43 39 }; 44 40 41 + enum sti_plane_status { 42 + STI_PLANE_READY, 43 + STI_PLANE_UPDATED, 44 + STI_PLANE_DISABLING, 45 + STI_PLANE_FLUSHING, 46 + STI_PLANE_DISABLED, 47 + }; 48 + 45 49 /** 46 50 * STI plane structure 47 51 * 48 52 * @plane: drm plane it is bound to (if any) 49 - * @fb: drm fb it is bound to 50 - * @mode: display mode 51 53 * @desc: plane type & id 52 - * @ops: plane functions 54 + * @status: to know the status of the plane 53 55 * @zorder: plane z-order 54 - * @mixer_id: id of the mixer used to display the plane 55 - * @enabled: to know if the plane is active or not 56 - * @src_x src_y: coordinates of the input (fb) area 57 - * @src_w src_h: size of the input (fb) area 58 - * @dst_x dst_y: coordinates of the output (crtc) area 59 - * @dst_w dst_h: size of the output (crtc) area 60 - * @format: format 61 - * @pitches: pitch of 'planes' (eg: Y, U, V) 62 - * @offsets: offset of 'planes' 63 - * @vaddr: virtual address of the input buffer 64 - * @paddr: physical address of the input buffer 65 56 */ 66 57 struct sti_plane { 67 58 struct drm_plane drm_plane; 68 - struct drm_framebuffer *fb; 69 - struct drm_display_mode *mode; 70 59 enum sti_plane_desc desc; 71 - const struct sti_plane_funcs *ops; 60 + enum sti_plane_status status; 72 61 int zorder; 73 - int mixer_id; 74 - bool enabled; 75 - int src_x, src_y; 76 - int src_w, src_h; 77 - int dst_x, dst_y; 78 - int dst_w, dst_h; 79 - uint32_t format; 80 - unsigned int pitches[4]; 81 - unsigned int offsets[4]; 82 - void *vaddr; 83 - dma_addr_t paddr; 84 62 }; 85 63 86 - /** 87 - * STI plane functions structure 88 - * 89 - * @get_formats: get plane supported formats 90 - * @get_nb_formats: get number of format supported 91 - * @prepare: prepare plane before rendering 92 - * @commit: set plane for rendering 93 - * @disable: disable plane 94 - */ 95 - struct sti_plane_funcs { 96 - const uint32_t* (*get_formats)(struct sti_plane *plane); 97 - unsigned int (*get_nb_formats)(struct sti_plane *plane); 98 - int (*prepare)(struct sti_plane *plane, bool first_prepare); 99 - int (*commit)(struct sti_plane *plane); 100 - int (*disable)(struct sti_plane *plane); 101 - }; 102 - 103 - struct drm_plane *sti_plane_init(struct drm_device *dev, 104 - struct sti_plane *sti_plane, 105 - unsigned int possible_crtcs, 106 - enum drm_plane_type type); 107 64 const char *sti_plane_to_str(struct sti_plane *plane); 108 - 65 + void sti_plane_init_property(struct sti_plane *plane, 66 + enum drm_plane_type type); 109 67 #endif
+18 -11
drivers/gpu/drm/sti/sti_vid.c
··· 43 43 #define VID_MPR2_BT709 0x07150545 44 44 #define VID_MPR3_BT709 0x00000AE8 45 45 46 - int sti_vid_commit(struct sti_vid *vid, struct sti_plane *plane) 46 + void sti_vid_commit(struct sti_vid *vid, 47 + struct drm_plane_state *state) 47 48 { 48 - struct drm_display_mode *mode = plane->mode; 49 + struct drm_crtc *crtc = state->crtc; 50 + struct drm_display_mode *mode = &crtc->mode; 51 + int dst_x = state->crtc_x; 52 + int dst_y = state->crtc_y; 53 + int dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x); 54 + int dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y); 49 55 u32 val, ydo, xdo, yds, xds; 56 + 57 + /* Input / output size 58 + * Align to upper even value */ 59 + dst_w = ALIGN(dst_w, 2); 60 + dst_h = ALIGN(dst_h, 2); 50 61 51 62 /* Unmask */ 52 63 val = readl(vid->regs + VID_CTL); 53 64 val &= ~VID_CTL_IGNORE; 54 65 writel(val, vid->regs + VID_CTL); 55 66 56 - ydo = sti_vtg_get_line_number(*mode, plane->dst_y); 57 - yds = sti_vtg_get_line_number(*mode, plane->dst_y + plane->dst_h - 1); 58 - xdo = sti_vtg_get_pixel_number(*mode, plane->dst_x); 59 - xds = sti_vtg_get_pixel_number(*mode, plane->dst_x + plane->dst_w - 1); 67 + ydo = sti_vtg_get_line_number(*mode, dst_y); 68 + yds = sti_vtg_get_line_number(*mode, dst_y + dst_h - 1); 69 + xdo = sti_vtg_get_pixel_number(*mode, dst_x); 70 + xds = sti_vtg_get_pixel_number(*mode, dst_x + dst_w - 1); 60 71 61 72 writel((ydo << 16) | xdo, vid->regs + VID_VPO); 62 73 writel((yds << 16) | xds, vid->regs + VID_VPS); 63 - 64 - return 0; 65 74 } 66 75 67 - int sti_vid_disable(struct sti_vid *vid) 76 + void sti_vid_disable(struct sti_vid *vid) 68 77 { 69 78 u32 val; 70 79 ··· 81 72 val = readl(vid->regs + VID_CTL); 82 73 val |= VID_CTL_IGNORE; 83 74 writel(val, vid->regs + VID_CTL); 84 - 85 - return 0; 86 75 } 87 76 88 77 static void sti_vid_init(struct sti_vid *vid)
+3 -2
drivers/gpu/drm/sti/sti_vid.h
··· 20 20 int id; 21 21 }; 22 22 23 - int sti_vid_commit(struct sti_vid *vid, struct sti_plane *plane); 24 - int sti_vid_disable(struct sti_vid *vid); 23 + void sti_vid_commit(struct sti_vid *vid, 24 + struct drm_plane_state *state); 25 + void sti_vid_disable(struct sti_vid *vid); 25 26 struct sti_vid *sti_vid_create(struct device *dev, int id, 26 27 void __iomem *baseaddr); 27 28