Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'drm-sti-next-atomic-2015-08-11' of http://git.linaro.org/people/benjamin.gaignard/kernel into drm-next

This serie of patches fix minor bugs around how driver sub-components are
bind and planes z-ordering.
The main part is about atomic support: using more atomic helpers allow us
to simplify the code (~300 lines removed) and to ahve a better match between
drm concepts (planes and crtc) and hardware split.

[airlied: fixed up conflict in atomic code]

* 'drm-sti-next-atomic-2015-08-11' of http://git.linaro.org/people/benjamin.gaignard/kernel:
drm/sti: atomic crtc/plane update
drm/sti: rename files and functions
drm/sti: code clean up
drm/sti: fix dynamic z-ordering
drm: sti: fix sub-components bind

+1385 -1697
+32 -34
Documentation/devicetree/bindings/gpu/st,stih4xx.txt
··· 52 52 See ../reset/reset.txt for details. 53 53 - reset-names: names of the resets listed in resets property in the same 54 54 order. 55 - - ranges: to allow probing of subdevices 56 55 57 56 - sti-hdmi: hdmi output block 58 - must be a child of sti-tvout 57 + must be a child of sti-display-subsystem 59 58 Required properties: 60 59 - compatible: "st,stih<chip>-hdmi"; 61 60 - reg: Physical base address of the IP registers and length of memory mapped region. ··· 71 72 72 73 sti-hda: 73 74 Required properties: 74 - must be a child of sti-tvout 75 + must be a child of sti-display-subsystem 75 76 - compatible: "st,stih<chip>-hda" 76 77 - reg: Physical base address of the IP registers and length of memory mapped region. 77 78 - reg-names: names of the mapped memory regions listed in regs property in ··· 84 85 85 86 sti-dvo: 86 87 Required properties: 87 - must be a child of sti-tvout 88 + must be a child of sti-display-subsystem 88 89 - compatible: "st,stih<chip>-dvo" 89 90 - reg: Physical base address of the IP registers and length of memory mapped region. 90 91 - reg-names: names of the mapped memory regions listed in regs property in ··· 194 195 reg-names = "tvout-reg", "hda-reg", "syscfg"; 195 196 reset-names = "tvout"; 196 197 resets = <&softreset STIH416_HDTVOUT_SOFTRESET>; 197 - ranges; 198 + }; 198 199 199 - sti-hdmi@fe85c000 { 200 - compatible = "st,stih416-hdmi"; 201 - reg = <0xfe85c000 0x1000>, <0xfe830000 0x10000>; 202 - reg-names = "hdmi-reg", "syscfg"; 203 - interrupts = <GIC_SPI 173 IRQ_TYPE_NONE>; 204 - interrupt-names = "irq"; 205 - clock-names = "pix", "tmds", "phy", "audio"; 206 - clocks = <&clockgen_c_vcc CLK_S_PIX_HDMI>, <&clockgen_c_vcc CLK_S_TMDS_HDMI>, <&clockgen_c_vcc CLK_S_HDMI_REJECT_PLL>, <&clockgen_b1 CLK_S_PCM_0>; 207 - }; 200 + sti-hdmi@fe85c000 { 201 + compatible = "st,stih416-hdmi"; 202 + reg = <0xfe85c000 0x1000>, <0xfe830000 0x10000>; 203 + reg-names = "hdmi-reg", "syscfg"; 204 + interrupts = <GIC_SPI 173 IRQ_TYPE_NONE>; 205 + interrupt-names = "irq"; 206 + clock-names = "pix", "tmds", "phy", "audio"; 207 + clocks = <&clockgen_c_vcc CLK_S_PIX_HDMI>, <&clockgen_c_vcc CLK_S_TMDS_HDMI>, <&clockgen_c_vcc CLK_S_HDMI_REJECT_PLL>, <&clockgen_b1 CLK_S_PCM_0>; 208 + }; 208 209 209 - sti-hda@fe85a000 { 210 - compatible = "st,stih416-hda"; 211 - reg = <0xfe85a000 0x400>, <0xfe83085c 0x4>; 212 - reg-names = "hda-reg", "video-dacs-ctrl"; 213 - clock-names = "pix", "hddac"; 214 - clocks = <&clockgen_c_vcc CLK_S_PIX_HD>, <&clockgen_c_vcc CLK_S_HDDAC>; 215 - }; 210 + sti-hda@fe85a000 { 211 + compatible = "st,stih416-hda"; 212 + reg = <0xfe85a000 0x400>, <0xfe83085c 0x4>; 213 + reg-names = "hda-reg", "video-dacs-ctrl"; 214 + clock-names = "pix", "hddac"; 215 + clocks = <&clockgen_c_vcc CLK_S_PIX_HD>, <&clockgen_c_vcc CLK_S_HDDAC>; 216 + }; 216 217 217 - sti-dvo@8d00400 { 218 - compatible = "st,stih407-dvo"; 219 - reg = <0x8d00400 0x200>; 220 - reg-names = "dvo-reg"; 221 - clock-names = "dvo_pix", "dvo", 222 - "main_parent", "aux_parent"; 223 - clocks = <&clk_s_d2_flexgen CLK_PIX_DVO>, <&clk_s_d2_flexgen CLK_DVO>, 224 - <&clk_s_d2_quadfs 0>, <&clk_s_d2_quadfs 1>; 225 - pinctrl-names = "default"; 226 - pinctrl-0 = <&pinctrl_dvo>; 227 - sti,panel = <&panel_dvo>; 228 - }; 218 + sti-dvo@8d00400 { 219 + compatible = "st,stih407-dvo"; 220 + reg = <0x8d00400 0x200>; 221 + reg-names = "dvo-reg"; 222 + clock-names = "dvo_pix", "dvo", 223 + "main_parent", "aux_parent"; 224 + clocks = <&clk_s_d2_flexgen CLK_PIX_DVO>, <&clk_s_d2_flexgen CLK_DVO>, 225 + <&clk_s_d2_quadfs 0>, <&clk_s_d2_quadfs 1>; 226 + pinctrl-names = "default"; 227 + pinctrl-0 = <&pinctrl_dvo>; 228 + sti,panel = <&panel_dvo>; 229 229 }; 230 230 231 231 sti-hqvdp@9c000000 { ··· 235 237 reset-names = "hqvdp"; 236 238 resets = <&softreset STIH407_HDQVDP_SOFTRESET>; 237 239 st,vtg = <&vtg_main>; 238 - }; 240 + }; 239 241 }; 240 242 ... 241 243 };
+3 -4
drivers/gpu/drm/sti/Makefile
··· 1 1 sticompositor-y := \ 2 - sti_layer.o \ 3 2 sti_mixer.o \ 4 3 sti_gdp.o \ 5 4 sti_vid.o \ 6 5 sti_cursor.o \ 7 6 sti_compositor.o \ 8 - sti_drm_crtc.o \ 9 - sti_drm_plane.o 7 + sti_crtc.o \ 8 + sti_plane.o 10 9 11 10 stihdmi-y := sti_hdmi.o \ 12 11 sti_hdmi_tx3g0c55phy.o \ ··· 23 24 sticompositor.o \ 24 25 sti_hqvdp.o \ 25 26 stidvo.o \ 26 - sti_drm_drv.o 27 + sti_drv.o
+69 -74
drivers/gpu/drm/sti/sti_compositor.c
··· 14 14 #include <drm/drmP.h> 15 15 16 16 #include "sti_compositor.h" 17 - #include "sti_drm_crtc.h" 18 - #include "sti_drm_drv.h" 19 - #include "sti_drm_plane.h" 17 + #include "sti_crtc.h" 18 + #include "sti_cursor.h" 19 + #include "sti_drv.h" 20 20 #include "sti_gdp.h" 21 + #include "sti_plane.h" 22 + #include "sti_vid.h" 21 23 #include "sti_vtg.h" 22 24 23 25 /* ··· 33 31 {STI_GPD_SUBDEV, (int)STI_GDP_1, 0x200}, 34 32 {STI_GPD_SUBDEV, (int)STI_GDP_2, 0x300}, 35 33 {STI_GPD_SUBDEV, (int)STI_GDP_3, 0x400}, 36 - {STI_VID_SUBDEV, (int)STI_VID_0, 0x700}, 34 + {STI_VID_SUBDEV, (int)STI_HQVDP_0, 0x700}, 37 35 {STI_MIXER_MAIN_SUBDEV, STI_MIXER_MAIN, 0xC00}, 38 36 {STI_MIXER_AUX_SUBDEV, STI_MIXER_AUX, 0xD00}, 39 37 }, ··· 55 53 }, 56 54 }; 57 55 58 - static int sti_compositor_init_subdev(struct sti_compositor *compo, 59 - struct sti_compositor_subdev_descriptor *desc, 60 - unsigned int array_size) 56 + static int sti_compositor_bind(struct device *dev, 57 + struct device *master, 58 + void *data) 61 59 { 62 - unsigned int i, mixer_id = 0, layer_id = 0; 60 + struct sti_compositor *compo = dev_get_drvdata(dev); 61 + struct drm_device *drm_dev = data; 62 + unsigned int i, mixer_id = 0, vid_id = 0, crtc_id = 0; 63 + struct sti_private *dev_priv = drm_dev->dev_private; 64 + struct drm_plane *cursor = NULL; 65 + struct drm_plane *primary = NULL; 66 + struct sti_compositor_subdev_descriptor *desc = compo->data.subdev_desc; 67 + unsigned int array_size = compo->data.nb_subdev; 63 68 69 + dev_priv->compo = compo; 70 + 71 + /* Register mixer subdev and video subdev first */ 64 72 for (i = 0; i < array_size; i++) { 65 73 switch (desc[i].type) { 74 + case STI_VID_SUBDEV: 75 + compo->vid[vid_id++] = 76 + sti_vid_create(compo->dev, desc[i].id, 77 + compo->regs + desc[i].offset); 78 + break; 66 79 case STI_MIXER_MAIN_SUBDEV: 67 80 case STI_MIXER_AUX_SUBDEV: 68 81 compo->mixer[mixer_id++] = ··· 85 68 compo->regs + desc[i].offset); 86 69 break; 87 70 case STI_GPD_SUBDEV: 88 - case STI_VID_SUBDEV: 89 71 case STI_CURSOR_SUBDEV: 90 - compo->layer[layer_id++] = 91 - sti_layer_create(compo->dev, desc[i].id, 92 - compo->regs + desc[i].offset); 72 + /* Nothing to do, wait for the second round */ 93 73 break; 94 74 default: 95 75 DRM_ERROR("Unknow subdev compoment type\n"); 96 76 return 1; 97 77 } 98 - 99 78 } 100 - compo->nb_mixers = mixer_id; 101 - compo->nb_layers = layer_id; 102 79 103 - return 0; 104 - } 80 + /* Register the other subdevs, create crtc and planes */ 81 + for (i = 0; i < array_size; i++) { 82 + enum drm_plane_type plane_type = DRM_PLANE_TYPE_OVERLAY; 105 83 106 - static int sti_compositor_bind(struct device *dev, struct device *master, 107 - void *data) 108 - { 109 - struct sti_compositor *compo = dev_get_drvdata(dev); 110 - struct drm_device *drm_dev = data; 111 - unsigned int i, crtc = 0, plane = 0; 112 - struct sti_drm_private *dev_priv = drm_dev->dev_private; 113 - struct drm_plane *cursor = NULL; 114 - struct drm_plane *primary = NULL; 84 + if (crtc_id < mixer_id) 85 + plane_type = DRM_PLANE_TYPE_PRIMARY; 115 86 116 - dev_priv->compo = compo; 117 - 118 - for (i = 0; i < compo->nb_layers; i++) { 119 - if (compo->layer[i]) { 120 - enum sti_layer_desc desc = compo->layer[i]->desc; 121 - enum sti_layer_type type = desc & STI_LAYER_TYPE_MASK; 122 - enum drm_plane_type plane_type = DRM_PLANE_TYPE_OVERLAY; 123 - 124 - if (crtc < compo->nb_mixers) 125 - plane_type = DRM_PLANE_TYPE_PRIMARY; 126 - 127 - switch (type) { 128 - case STI_CUR: 129 - cursor = sti_drm_plane_init(drm_dev, 130 - compo->layer[i], 131 - 1, DRM_PLANE_TYPE_CURSOR); 132 - break; 133 - case STI_GDP: 134 - case STI_VID: 135 - primary = sti_drm_plane_init(drm_dev, 136 - compo->layer[i], 137 - (1 << compo->nb_mixers) - 1, 138 - plane_type); 139 - plane++; 140 - break; 141 - case STI_BCK: 142 - case STI_VDP: 87 + switch (desc[i].type) { 88 + case STI_MIXER_MAIN_SUBDEV: 89 + case STI_MIXER_AUX_SUBDEV: 90 + case STI_VID_SUBDEV: 91 + /* Nothing to do, already done at the first round */ 92 + break; 93 + case STI_CURSOR_SUBDEV: 94 + cursor = sti_cursor_create(drm_dev, compo->dev, 95 + desc[i].id, 96 + compo->regs + desc[i].offset, 97 + 1); 98 + if (!cursor) { 99 + DRM_ERROR("Can't create CURSOR plane\n"); 143 100 break; 144 101 } 145 - 146 - /* The first planes are reserved for primary planes*/ 147 - if (crtc < compo->nb_mixers && primary) { 148 - sti_drm_crtc_init(drm_dev, compo->mixer[crtc], 149 - primary, cursor); 150 - crtc++; 151 - cursor = NULL; 152 - primary = NULL; 102 + break; 103 + case STI_GPD_SUBDEV: 104 + primary = sti_gdp_create(drm_dev, compo->dev, 105 + desc[i].id, 106 + compo->regs + desc[i].offset, 107 + (1 << mixer_id) - 1, 108 + plane_type); 109 + if (!primary) { 110 + DRM_ERROR("Can't create GDP plane\n"); 111 + break; 153 112 } 113 + break; 114 + default: 115 + DRM_ERROR("Unknown subdev compoment type\n"); 116 + return 1; 117 + } 118 + 119 + /* The first planes are reserved for primary planes*/ 120 + if (crtc_id < mixer_id && primary) { 121 + sti_crtc_init(drm_dev, compo->mixer[crtc_id], 122 + primary, cursor); 123 + crtc_id++; 124 + cursor = NULL; 125 + primary = NULL; 154 126 } 155 127 } 156 128 157 - drm_vblank_init(drm_dev, crtc); 129 + drm_vblank_init(drm_dev, crtc_id); 158 130 /* Allow usage of vblank without having to call drm_irq_install */ 159 131 drm_dev->irq_enabled = 1; 160 - 161 - DRM_DEBUG_DRIVER("Initialized %d DRM CRTC(s) and %d DRM plane(s)\n", 162 - crtc, plane); 163 - DRM_DEBUG_DRIVER("DRM plane(s) for VID/VDP not created yet\n"); 164 132 165 133 return 0; 166 134 } ··· 181 179 struct device_node *vtg_np; 182 180 struct sti_compositor *compo; 183 181 struct resource *res; 184 - int err; 185 182 186 183 compo = devm_kzalloc(dev, sizeof(*compo), GFP_KERNEL); 187 184 if (!compo) { ··· 188 187 return -ENOMEM; 189 188 } 190 189 compo->dev = dev; 191 - compo->vtg_vblank_nb.notifier_call = sti_drm_crtc_vblank_cb; 190 + compo->vtg_vblank_nb.notifier_call = sti_crtc_vblank_cb; 192 191 193 192 /* populate data structure depending on compatibility */ 194 193 BUG_ON(!of_match_node(compositor_of_match, np)->data); ··· 251 250 vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 1); 252 251 if (vtg_np) 253 252 compo->vtg_aux = of_vtg_find(vtg_np); 254 - 255 - /* Initialize compositor subdevices */ 256 - err = sti_compositor_init_subdev(compo, compo->data.subdev_desc, 257 - compo->data.nb_subdev); 258 - if (err) 259 - return err; 260 253 261 254 platform_set_drvdata(pdev, compo); 262 255
+4 -8
drivers/gpu/drm/sti/sti_compositor.h
··· 12 12 #include <linux/clk.h> 13 13 #include <linux/kernel.h> 14 14 15 - #include "sti_layer.h" 16 15 #include "sti_mixer.h" 16 + #include "sti_plane.h" 17 17 18 18 #define WAIT_NEXT_VSYNC_MS 50 /*ms*/ 19 19 20 - #define STI_MAX_LAYER 8 21 20 #define STI_MAX_MIXER 2 21 + #define STI_MAX_VID 1 22 22 23 23 enum sti_compositor_subdev_type { 24 24 STI_MIXER_MAIN_SUBDEV, ··· 59 59 * @rst_main: reset control of the main path 60 60 * @rst_aux: reset control of the aux path 61 61 * @mixer: array of mixers 62 + * @vid: array of vids 62 63 * @vtg_main: vtg for main data path 63 64 * @vtg_aux: vtg for auxillary data path 64 - * @layer: array of layers 65 - * @nb_mixers: number of mixers for this compositor 66 - * @nb_layers: number of layers (GDP,VID,...) for this compositor 67 65 * @vtg_vblank_nb: callback for VTG VSYNC notification 68 66 */ 69 67 struct sti_compositor { ··· 75 77 struct reset_control *rst_main; 76 78 struct reset_control *rst_aux; 77 79 struct sti_mixer *mixer[STI_MAX_MIXER]; 80 + struct sti_vid *vid[STI_MAX_VID]; 78 81 struct sti_vtg *vtg_main; 79 82 struct sti_vtg *vtg_aux; 80 - struct sti_layer *layer[STI_MAX_LAYER]; 81 - int nb_mixers; 82 - int nb_layers; 83 83 struct notifier_block vtg_vblank_nb; 84 84 }; 85 85
+22
drivers/gpu/drm/sti/sti_crtc.h
··· 1 + /* 2 + * Copyright (C) STMicroelectronics SA 2014 3 + * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics. 4 + * License terms: GNU General Public License (GPL), version 2 5 + */ 6 + 7 + #ifndef _STI_CRTC_H_ 8 + #define _STI_CRTC_H_ 9 + 10 + #include <drm/drmP.h> 11 + 12 + struct sti_mixer; 13 + 14 + int sti_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer, 15 + struct drm_plane *primary, struct drm_plane *cursor); 16 + int sti_crtc_enable_vblank(struct drm_device *dev, int crtc); 17 + void sti_crtc_disable_vblank(struct drm_device *dev, int crtc); 18 + int sti_crtc_vblank_cb(struct notifier_block *nb, 19 + unsigned long event, void *data); 20 + bool sti_crtc_is_main(struct drm_crtc *drm_crtc); 21 + 22 + #endif
+183 -132
drivers/gpu/drm/sti/sti_cursor.c
··· 7 7 */ 8 8 #include <drm/drmP.h> 9 9 10 + #include <drm/drm_atomic_helper.h> 11 + #include <drm/drm_fb_cma_helper.h> 12 + #include <drm/drm_gem_cma_helper.h> 13 + #include <drm/drm_plane_helper.h> 14 + 15 + #include "sti_compositor.h" 10 16 #include "sti_cursor.h" 11 - #include "sti_layer.h" 17 + #include "sti_plane.h" 12 18 #include "sti_vtg.h" 13 19 14 20 /* Registers */ ··· 48 42 /** 49 43 * STI Cursor structure 50 44 * 51 - * @layer: layer structure 52 - * @width: cursor width 53 - * @height: cursor height 54 - * @clut: color look up table 55 - * @clut_paddr: color look up table physical address 56 - * @pixmap: pixmap dma buffer (clut8-format cursor) 45 + * @sti_plane: sti_plane structure 46 + * @dev: driver device 47 + * @regs: cursor registers 48 + * @width: cursor width 49 + * @height: cursor height 50 + * @clut: color look up table 51 + * @clut_paddr: color look up table physical address 52 + * @pixmap: pixmap dma buffer (clut8-format cursor) 57 53 */ 58 54 struct sti_cursor { 59 - struct sti_layer layer; 55 + struct sti_plane plane; 56 + struct device *dev; 57 + void __iomem *regs; 60 58 unsigned int width; 61 59 unsigned int height; 62 60 unsigned short *clut; ··· 72 62 DRM_FORMAT_ARGB8888, 73 63 }; 74 64 75 - #define to_sti_cursor(x) container_of(x, struct sti_cursor, layer) 65 + #define to_sti_cursor(x) container_of(x, struct sti_cursor, plane) 76 66 77 - static const uint32_t *sti_cursor_get_formats(struct sti_layer *layer) 67 + static void sti_cursor_argb8888_to_clut8(struct sti_cursor *cursor, u32 *src) 78 68 { 79 - return cursor_supported_formats; 80 - } 81 - 82 - static unsigned int sti_cursor_get_nb_formats(struct sti_layer *layer) 83 - { 84 - return ARRAY_SIZE(cursor_supported_formats); 85 - } 86 - 87 - static void sti_cursor_argb8888_to_clut8(struct sti_layer *layer) 88 - { 89 - struct sti_cursor *cursor = to_sti_cursor(layer); 90 - u32 *src = layer->vaddr; 91 69 u8 *dst = cursor->pixmap.base; 92 70 unsigned int i, j; 93 71 u32 a, r, g, b; ··· 94 96 } 95 97 } 96 98 97 - static int sti_cursor_prepare_layer(struct sti_layer *layer, bool first_prepare) 99 + static void sti_cursor_init(struct sti_cursor *cursor) 98 100 { 99 - struct sti_cursor *cursor = to_sti_cursor(layer); 100 - struct drm_display_mode *mode = layer->mode; 101 - u32 y, x; 102 - u32 val; 103 - 104 - DRM_DEBUG_DRIVER("\n"); 105 - 106 - dev_dbg(layer->dev, "%s %s\n", __func__, sti_layer_to_str(layer)); 107 - 108 - if (layer->src_w < STI_CURS_MIN_SIZE || 109 - layer->src_h < STI_CURS_MIN_SIZE || 110 - layer->src_w > STI_CURS_MAX_SIZE || 111 - layer->src_h > STI_CURS_MAX_SIZE) { 112 - DRM_ERROR("Invalid cursor size (%dx%d)\n", 113 - layer->src_w, layer->src_h); 114 - return -EINVAL; 115 - } 116 - 117 - /* If the cursor size has changed, re-allocated the pixmap */ 118 - if (!cursor->pixmap.base || 119 - (cursor->width != layer->src_w) || 120 - (cursor->height != layer->src_h)) { 121 - cursor->width = layer->src_w; 122 - cursor->height = layer->src_h; 123 - 124 - if (cursor->pixmap.base) 125 - dma_free_writecombine(layer->dev, 126 - cursor->pixmap.size, 127 - cursor->pixmap.base, 128 - cursor->pixmap.paddr); 129 - 130 - cursor->pixmap.size = cursor->width * cursor->height; 131 - 132 - cursor->pixmap.base = dma_alloc_writecombine(layer->dev, 133 - cursor->pixmap.size, 134 - &cursor->pixmap.paddr, 135 - GFP_KERNEL | GFP_DMA); 136 - if (!cursor->pixmap.base) { 137 - DRM_ERROR("Failed to allocate memory for pixmap\n"); 138 - return -ENOMEM; 139 - } 140 - } 141 - 142 - /* Convert ARGB8888 to CLUT8 */ 143 - sti_cursor_argb8888_to_clut8(layer); 144 - 145 - /* AWS and AWE depend on the mode */ 146 - y = sti_vtg_get_line_number(*mode, 0); 147 - x = sti_vtg_get_pixel_number(*mode, 0); 148 - val = y << 16 | x; 149 - writel(val, layer->regs + CUR_AWS); 150 - y = sti_vtg_get_line_number(*mode, mode->vdisplay - 1); 151 - x = sti_vtg_get_pixel_number(*mode, mode->hdisplay - 1); 152 - val = y << 16 | x; 153 - writel(val, layer->regs + CUR_AWE); 154 - 155 - if (first_prepare) { 156 - /* Set and fetch CLUT */ 157 - writel(cursor->clut_paddr, layer->regs + CUR_CML); 158 - writel(CUR_CTL_CLUT_UPDATE, layer->regs + CUR_CTL); 159 - } 160 - 161 - return 0; 162 - } 163 - 164 - static int sti_cursor_commit_layer(struct sti_layer *layer) 165 - { 166 - struct sti_cursor *cursor = to_sti_cursor(layer); 167 - struct drm_display_mode *mode = layer->mode; 168 - u32 ydo, xdo; 169 - 170 - dev_dbg(layer->dev, "%s %s\n", __func__, sti_layer_to_str(layer)); 171 - 172 - /* Set memory location, size, and position */ 173 - writel(cursor->pixmap.paddr, layer->regs + CUR_PML); 174 - writel(cursor->width, layer->regs + CUR_PMP); 175 - writel(cursor->height << 16 | cursor->width, layer->regs + CUR_SIZE); 176 - 177 - ydo = sti_vtg_get_line_number(*mode, layer->dst_y); 178 - xdo = sti_vtg_get_pixel_number(*mode, layer->dst_y); 179 - writel((ydo << 16) | xdo, layer->regs + CUR_VPO); 180 - 181 - return 0; 182 - } 183 - 184 - static int sti_cursor_disable_layer(struct sti_layer *layer) 185 - { 186 - return 0; 187 - } 188 - 189 - static void sti_cursor_init(struct sti_layer *layer) 190 - { 191 - struct sti_cursor *cursor = to_sti_cursor(layer); 192 101 unsigned short *base = cursor->clut; 193 102 unsigned int a, r, g, b; 194 103 ··· 110 205 (b * 5); 111 206 } 112 207 113 - static const struct sti_layer_funcs cursor_ops = { 114 - .get_formats = sti_cursor_get_formats, 115 - .get_nb_formats = sti_cursor_get_nb_formats, 116 - .init = sti_cursor_init, 117 - .prepare = sti_cursor_prepare_layer, 118 - .commit = sti_cursor_commit_layer, 119 - .disable = sti_cursor_disable_layer, 208 + static void sti_cursor_atomic_update(struct drm_plane *drm_plane, 209 + struct drm_plane_state *oldstate) 210 + { 211 + struct drm_plane_state *state = drm_plane->state; 212 + struct sti_plane *plane = to_sti_plane(drm_plane); 213 + struct sti_cursor *cursor = to_sti_cursor(plane); 214 + struct drm_crtc *crtc = state->crtc; 215 + struct sti_mixer *mixer = to_sti_mixer(crtc); 216 + struct drm_framebuffer *fb = state->fb; 217 + struct drm_display_mode *mode = &crtc->mode; 218 + int dst_x = state->crtc_x; 219 + int dst_y = state->crtc_y; 220 + int dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x); 221 + int dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y); 222 + /* src_x are in 16.16 format */ 223 + int src_w = state->src_w >> 16; 224 + int src_h = state->src_h >> 16; 225 + bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false; 226 + struct drm_gem_cma_object *cma_obj; 227 + u32 y, x; 228 + u32 val; 229 + 230 + DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n", 231 + crtc->base.id, sti_mixer_to_str(mixer), 232 + drm_plane->base.id, sti_plane_to_str(plane)); 233 + DRM_DEBUG_KMS("(%dx%d)@(%d,%d)\n", dst_w, dst_h, dst_x, dst_y); 234 + 235 + dev_dbg(cursor->dev, "%s %s\n", __func__, 236 + sti_plane_to_str(plane)); 237 + 238 + if (src_w < STI_CURS_MIN_SIZE || 239 + src_h < STI_CURS_MIN_SIZE || 240 + src_w > STI_CURS_MAX_SIZE || 241 + src_h > STI_CURS_MAX_SIZE) { 242 + DRM_ERROR("Invalid cursor size (%dx%d)\n", 243 + src_w, src_h); 244 + return; 245 + } 246 + 247 + /* If the cursor size has changed, re-allocated the pixmap */ 248 + if (!cursor->pixmap.base || 249 + (cursor->width != src_w) || 250 + (cursor->height != src_h)) { 251 + cursor->width = src_w; 252 + cursor->height = src_h; 253 + 254 + if (cursor->pixmap.base) 255 + dma_free_writecombine(cursor->dev, 256 + cursor->pixmap.size, 257 + cursor->pixmap.base, 258 + cursor->pixmap.paddr); 259 + 260 + cursor->pixmap.size = cursor->width * cursor->height; 261 + 262 + cursor->pixmap.base = dma_alloc_writecombine(cursor->dev, 263 + cursor->pixmap.size, 264 + &cursor->pixmap.paddr, 265 + GFP_KERNEL | GFP_DMA); 266 + if (!cursor->pixmap.base) { 267 + DRM_ERROR("Failed to allocate memory for pixmap\n"); 268 + return; 269 + } 270 + } 271 + 272 + cma_obj = drm_fb_cma_get_gem_obj(fb, 0); 273 + if (!cma_obj) { 274 + DRM_ERROR("Can't get CMA GEM object for fb\n"); 275 + return; 276 + } 277 + 278 + /* Convert ARGB8888 to CLUT8 */ 279 + sti_cursor_argb8888_to_clut8(cursor, (u32 *)cma_obj->vaddr); 280 + 281 + /* AWS and AWE depend on the mode */ 282 + y = sti_vtg_get_line_number(*mode, 0); 283 + x = sti_vtg_get_pixel_number(*mode, 0); 284 + val = y << 16 | x; 285 + writel(val, cursor->regs + CUR_AWS); 286 + y = sti_vtg_get_line_number(*mode, mode->vdisplay - 1); 287 + x = sti_vtg_get_pixel_number(*mode, mode->hdisplay - 1); 288 + val = y << 16 | x; 289 + writel(val, cursor->regs + CUR_AWE); 290 + 291 + if (first_prepare) { 292 + /* Set and fetch CLUT */ 293 + writel(cursor->clut_paddr, cursor->regs + CUR_CML); 294 + writel(CUR_CTL_CLUT_UPDATE, cursor->regs + CUR_CTL); 295 + } 296 + 297 + /* Set memory location, size, and position */ 298 + writel(cursor->pixmap.paddr, cursor->regs + CUR_PML); 299 + writel(cursor->width, cursor->regs + CUR_PMP); 300 + writel(cursor->height << 16 | cursor->width, cursor->regs + CUR_SIZE); 301 + 302 + y = sti_vtg_get_line_number(*mode, dst_y); 303 + x = sti_vtg_get_pixel_number(*mode, dst_y); 304 + writel((y << 16) | x, cursor->regs + CUR_VPO); 305 + 306 + plane->status = STI_PLANE_UPDATED; 307 + } 308 + 309 + static void sti_cursor_atomic_disable(struct drm_plane *drm_plane, 310 + struct drm_plane_state *oldstate) 311 + { 312 + struct sti_plane *plane = to_sti_plane(drm_plane); 313 + struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc); 314 + 315 + if (!drm_plane->crtc) { 316 + DRM_DEBUG_DRIVER("drm plane:%d not enabled\n", 317 + drm_plane->base.id); 318 + return; 319 + } 320 + 321 + DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n", 322 + drm_plane->crtc->base.id, sti_mixer_to_str(mixer), 323 + drm_plane->base.id, sti_plane_to_str(plane)); 324 + 325 + plane->status = STI_PLANE_DISABLING; 326 + } 327 + 328 + static const struct drm_plane_helper_funcs sti_cursor_helpers_funcs = { 329 + .atomic_update = sti_cursor_atomic_update, 330 + .atomic_disable = sti_cursor_atomic_disable, 120 331 }; 121 332 122 - struct sti_layer *sti_cursor_create(struct device *dev) 333 + struct drm_plane *sti_cursor_create(struct drm_device *drm_dev, 334 + struct device *dev, int desc, 335 + void __iomem *baseaddr, 336 + unsigned int possible_crtcs) 123 337 { 124 338 struct sti_cursor *cursor; 339 + size_t size; 340 + int res; 125 341 126 342 cursor = devm_kzalloc(dev, sizeof(*cursor), GFP_KERNEL); 127 343 if (!cursor) { ··· 251 225 } 252 226 253 227 /* Allocate clut buffer */ 254 - cursor->clut = dma_alloc_writecombine(dev, 255 - 0x100 * sizeof(unsigned short), 256 - &cursor->clut_paddr, 257 - GFP_KERNEL | GFP_DMA); 228 + size = 0x100 * sizeof(unsigned short); 229 + cursor->clut = dma_alloc_writecombine(dev, size, &cursor->clut_paddr, 230 + GFP_KERNEL | GFP_DMA); 258 231 259 232 if (!cursor->clut) { 260 233 DRM_ERROR("Failed to allocate memory for cursor clut\n"); 261 - devm_kfree(dev, cursor); 262 - return NULL; 234 + goto err_clut; 263 235 } 264 236 265 - cursor->layer.ops = &cursor_ops; 237 + cursor->dev = dev; 238 + cursor->regs = baseaddr; 239 + cursor->plane.desc = desc; 240 + cursor->plane.status = STI_PLANE_DISABLED; 266 241 267 - return (struct sti_layer *)cursor; 242 + sti_cursor_init(cursor); 243 + 244 + res = drm_universal_plane_init(drm_dev, &cursor->plane.drm_plane, 245 + possible_crtcs, 246 + &sti_plane_helpers_funcs, 247 + cursor_supported_formats, 248 + ARRAY_SIZE(cursor_supported_formats), 249 + DRM_PLANE_TYPE_CURSOR); 250 + if (res) { 251 + DRM_ERROR("Failed to initialize universal plane\n"); 252 + goto err_plane; 253 + } 254 + 255 + drm_plane_helper_add(&cursor->plane.drm_plane, 256 + &sti_cursor_helpers_funcs); 257 + 258 + sti_plane_init_property(&cursor->plane, DRM_PLANE_TYPE_CURSOR); 259 + 260 + return &cursor->plane.drm_plane; 261 + 262 + err_plane: 263 + dma_free_writecombine(dev, size, cursor->clut, cursor->clut_paddr); 264 + err_clut: 265 + devm_kfree(dev, cursor); 266 + return NULL; 268 267 }
+4 -1
drivers/gpu/drm/sti/sti_cursor.h
··· 7 7 #ifndef _STI_CURSOR_H_ 8 8 #define _STI_CURSOR_H_ 9 9 10 - struct sti_layer *sti_cursor_create(struct device *dev); 10 + struct drm_plane *sti_cursor_create(struct drm_device *drm_dev, 11 + struct device *dev, int desc, 12 + void __iomem *baseaddr, 13 + unsigned int possible_crtcs); 11 14 12 15 #endif
+140 -83
drivers/gpu/drm/sti/sti_drm_crtc.c drivers/gpu/drm/sti/sti_crtc.c
··· 15 15 #include <drm/drm_plane_helper.h> 16 16 17 17 #include "sti_compositor.h" 18 - #include "sti_drm_drv.h" 19 - #include "sti_drm_crtc.h" 18 + #include "sti_crtc.h" 19 + #include "sti_drv.h" 20 + #include "sti_vid.h" 20 21 #include "sti_vtg.h" 21 22 22 - static void sti_drm_crtc_dpms(struct drm_crtc *crtc, int mode) 23 - { 24 - DRM_DEBUG_KMS("\n"); 25 - } 26 - 27 - static void sti_drm_crtc_prepare(struct drm_crtc *crtc) 23 + static void sti_crtc_enable(struct drm_crtc *crtc) 28 24 { 29 25 struct sti_mixer *mixer = to_sti_mixer(crtc); 30 26 struct device *dev = mixer->dev; 31 27 struct sti_compositor *compo = dev_get_drvdata(dev); 32 28 33 - mixer->enabled = true; 29 + DRM_DEBUG_DRIVER("\n"); 30 + 31 + mixer->status = STI_MIXER_READY; 34 32 35 33 /* Prepare and enable the compo IP clock */ 36 34 if (mixer->id == STI_MIXER_MAIN) { ··· 39 41 DRM_INFO("Failed to prepare/enable compo_aux clk\n"); 40 42 } 41 43 42 - sti_mixer_clear_all_layers(mixer); 43 - } 44 - 45 - static void sti_drm_crtc_commit(struct drm_crtc *crtc) 46 - { 47 - struct sti_mixer *mixer = to_sti_mixer(crtc); 48 - struct device *dev = mixer->dev; 49 - struct sti_compositor *compo = dev_get_drvdata(dev); 50 - struct sti_layer *layer; 51 - 52 - if ((!mixer || !compo)) { 53 - DRM_ERROR("Can not find mixer or compositor)\n"); 54 - return; 55 - } 56 - 57 - /* get GDP which is reserved to the CRTC FB */ 58 - layer = to_sti_layer(crtc->primary); 59 - if (layer) 60 - sti_layer_commit(layer); 61 - else 62 - DRM_ERROR("Can not find CRTC dedicated plane (GDP0)\n"); 63 - 64 - /* Enable layer on mixer */ 65 - if (sti_mixer_set_layer_status(mixer, layer, true)) 66 - DRM_ERROR("Can not enable layer at mixer\n"); 67 - 68 44 drm_crtc_vblank_on(crtc); 69 45 } 70 46 71 - static bool sti_drm_crtc_mode_fixup(struct drm_crtc *crtc, 72 - const struct drm_display_mode *mode, 73 - struct drm_display_mode *adjusted_mode) 47 + static void sti_crtc_disabling(struct drm_crtc *crtc) 48 + { 49 + struct sti_mixer *mixer = to_sti_mixer(crtc); 50 + 51 + DRM_DEBUG_DRIVER("\n"); 52 + 53 + mixer->status = STI_MIXER_DISABLING; 54 + } 55 + 56 + static bool sti_crtc_mode_fixup(struct drm_crtc *crtc, 57 + const struct drm_display_mode *mode, 58 + struct drm_display_mode *adjusted_mode) 74 59 { 75 60 /* accept the provided drm_display_mode, do not fix it up */ 76 61 return true; 77 62 } 78 63 79 64 static int 80 - sti_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode) 65 + sti_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode) 81 66 { 82 67 struct sti_mixer *mixer = to_sti_mixer(crtc); 83 68 struct device *dev = mixer->dev; ··· 103 122 104 123 res = sti_mixer_active_video_area(mixer, &crtc->mode); 105 124 if (res) { 106 - DRM_ERROR("Can not set active video area\n"); 125 + DRM_ERROR("Can't set active video area\n"); 107 126 return -EINVAL; 108 127 } 109 128 110 129 return res; 111 130 } 112 131 113 - static void sti_drm_crtc_disable(struct drm_crtc *crtc) 132 + static void sti_crtc_disable(struct drm_crtc *crtc) 114 133 { 115 134 struct sti_mixer *mixer = to_sti_mixer(crtc); 116 135 struct device *dev = mixer->dev; 117 136 struct sti_compositor *compo = dev_get_drvdata(dev); 118 - 119 - if (!mixer->enabled) 120 - return; 121 137 122 138 DRM_DEBUG_KMS("CRTC:%d (%s)\n", crtc->base.id, sti_mixer_to_str(mixer)); 123 139 ··· 132 154 clk_disable_unprepare(compo->clk_compo_aux); 133 155 } 134 156 135 - mixer->enabled = false; 157 + mixer->status = STI_MIXER_DISABLED; 136 158 } 137 159 138 160 static void 139 - sti_drm_crtc_mode_set_nofb(struct drm_crtc *crtc) 161 + sti_crtc_mode_set_nofb(struct drm_crtc *crtc) 140 162 { 141 - sti_drm_crtc_prepare(crtc); 142 - sti_drm_crtc_mode_set(crtc, &crtc->state->adjusted_mode); 163 + sti_crtc_enable(crtc); 164 + sti_crtc_mode_set(crtc, &crtc->state->adjusted_mode); 143 165 } 144 166 145 - static void sti_drm_atomic_begin(struct drm_crtc *crtc, 146 - struct drm_crtc_state *old_crtc_state) 167 + static void sti_crtc_atomic_begin(struct drm_crtc *crtc, 168 + struct drm_crtc_state *old_crtc_state) 147 169 { 148 170 struct sti_mixer *mixer = to_sti_mixer(crtc); 149 171 ··· 157 179 } 158 180 } 159 181 160 - static void sti_drm_atomic_flush(struct drm_crtc *crtc, 161 - struct drm_crtc_state *old_crtc_state) 182 + static void sti_crtc_atomic_flush(struct drm_crtc *crtc, 183 + struct drm_crtc_state *old_crtc_state) 162 184 { 185 + struct drm_device *drm_dev = crtc->dev; 186 + struct sti_mixer *mixer = to_sti_mixer(crtc); 187 + struct sti_compositor *compo = dev_get_drvdata(mixer->dev); 188 + struct drm_plane *p; 189 + 190 + DRM_DEBUG_DRIVER("\n"); 191 + 192 + /* perform plane actions */ 193 + list_for_each_entry(p, &drm_dev->mode_config.plane_list, head) { 194 + struct sti_plane *plane = to_sti_plane(p); 195 + 196 + switch (plane->status) { 197 + case STI_PLANE_UPDATED: 198 + /* update planes tag as updated */ 199 + DRM_DEBUG_DRIVER("update plane %s\n", 200 + sti_plane_to_str(plane)); 201 + 202 + if (sti_mixer_set_plane_depth(mixer, plane)) { 203 + DRM_ERROR("Cannot set plane %s depth\n", 204 + sti_plane_to_str(plane)); 205 + break; 206 + } 207 + 208 + if (sti_mixer_set_plane_status(mixer, plane, true)) { 209 + DRM_ERROR("Cannot enable plane %s at mixer\n", 210 + sti_plane_to_str(plane)); 211 + break; 212 + } 213 + 214 + /* if plane is HQVDP_0 then commit the vid[0] */ 215 + if (plane->desc == STI_HQVDP_0) 216 + sti_vid_commit(compo->vid[0], p->state); 217 + 218 + plane->status = STI_PLANE_READY; 219 + 220 + break; 221 + case STI_PLANE_DISABLING: 222 + /* disabling sequence for planes tag as disabling */ 223 + DRM_DEBUG_DRIVER("disable plane %s from mixer\n", 224 + sti_plane_to_str(plane)); 225 + 226 + if (sti_mixer_set_plane_status(mixer, plane, false)) { 227 + DRM_ERROR("Cannot disable plane %s at mixer\n", 228 + sti_plane_to_str(plane)); 229 + continue; 230 + } 231 + 232 + if (plane->desc == STI_CURSOR) 233 + /* tag plane status for disabled */ 234 + plane->status = STI_PLANE_DISABLED; 235 + else 236 + /* tag plane status for flushing */ 237 + plane->status = STI_PLANE_FLUSHING; 238 + 239 + /* if plane is HQVDP_0 then disable the vid[0] */ 240 + if (plane->desc == STI_HQVDP_0) 241 + sti_vid_disable(compo->vid[0]); 242 + 243 + break; 244 + default: 245 + /* Other status case are not handled */ 246 + break; 247 + } 248 + } 163 249 } 164 250 165 251 static struct drm_crtc_helper_funcs sti_crtc_helper_funcs = { 166 - .dpms = sti_drm_crtc_dpms, 167 - .prepare = sti_drm_crtc_prepare, 168 - .commit = sti_drm_crtc_commit, 169 - .mode_fixup = sti_drm_crtc_mode_fixup, 252 + .enable = sti_crtc_enable, 253 + .disable = sti_crtc_disabling, 254 + .mode_fixup = sti_crtc_mode_fixup, 170 255 .mode_set = drm_helper_crtc_mode_set, 171 - .mode_set_nofb = sti_drm_crtc_mode_set_nofb, 256 + .mode_set_nofb = sti_crtc_mode_set_nofb, 172 257 .mode_set_base = drm_helper_crtc_mode_set_base, 173 - .disable = sti_drm_crtc_disable, 174 - .atomic_begin = sti_drm_atomic_begin, 175 - .atomic_flush = sti_drm_atomic_flush, 258 + .atomic_begin = sti_crtc_atomic_begin, 259 + .atomic_flush = sti_crtc_atomic_flush, 176 260 }; 177 261 178 - static void sti_drm_crtc_destroy(struct drm_crtc *crtc) 262 + static void sti_crtc_destroy(struct drm_crtc *crtc) 179 263 { 180 264 DRM_DEBUG_KMS("\n"); 181 265 drm_crtc_cleanup(crtc); 182 266 } 183 267 184 - static int sti_drm_crtc_set_property(struct drm_crtc *crtc, 185 - struct drm_property *property, 186 - uint64_t val) 268 + static int sti_crtc_set_property(struct drm_crtc *crtc, 269 + struct drm_property *property, 270 + uint64_t val) 187 271 { 188 272 DRM_DEBUG_KMS("\n"); 189 273 return 0; 190 274 } 191 275 192 - int sti_drm_crtc_vblank_cb(struct notifier_block *nb, 193 - unsigned long event, void *data) 276 + int sti_crtc_vblank_cb(struct notifier_block *nb, 277 + unsigned long event, void *data) 194 278 { 195 279 struct drm_device *drm_dev; 196 280 struct sti_compositor *compo = 197 281 container_of(nb, struct sti_compositor, vtg_vblank_nb); 198 282 int *crtc = data; 199 283 unsigned long flags; 200 - struct sti_drm_private *priv; 284 + struct sti_private *priv; 201 285 202 286 drm_dev = compo->mixer[*crtc]->drm_crtc.dev; 203 287 priv = drm_dev->dev_private; ··· 275 235 spin_lock_irqsave(&drm_dev->event_lock, flags); 276 236 if (compo->mixer[*crtc]->pending_event) { 277 237 drm_send_vblank_event(drm_dev, -1, 278 - compo->mixer[*crtc]->pending_event); 238 + compo->mixer[*crtc]->pending_event); 279 239 drm_vblank_put(drm_dev, *crtc); 280 240 compo->mixer[*crtc]->pending_event = NULL; 281 241 } 282 242 spin_unlock_irqrestore(&drm_dev->event_lock, flags); 283 243 244 + if (compo->mixer[*crtc]->status == STI_MIXER_DISABLING) { 245 + struct drm_plane *p; 246 + 247 + /* Disable mixer only if all overlay planes (GDP and VDP) 248 + * are disabled */ 249 + list_for_each_entry(p, &drm_dev->mode_config.plane_list, head) { 250 + struct sti_plane *plane = to_sti_plane(p); 251 + 252 + if ((plane->desc & STI_PLANE_TYPE_MASK) <= STI_VDP) 253 + if (plane->status != STI_PLANE_DISABLED) 254 + return 0; 255 + } 256 + sti_crtc_disable(&compo->mixer[*crtc]->drm_crtc); 257 + } 258 + 284 259 return 0; 285 260 } 286 261 287 - int sti_drm_crtc_enable_vblank(struct drm_device *dev, int crtc) 262 + int sti_crtc_enable_vblank(struct drm_device *dev, int crtc) 288 263 { 289 - struct sti_drm_private *dev_priv = dev->dev_private; 264 + struct sti_private *dev_priv = dev->dev_private; 290 265 struct sti_compositor *compo = dev_priv->compo; 291 266 struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb; 267 + 268 + DRM_DEBUG_DRIVER("\n"); 292 269 293 270 if (sti_vtg_register_client(crtc == STI_MIXER_MAIN ? 294 271 compo->vtg_main : compo->vtg_aux, ··· 316 259 317 260 return 0; 318 261 } 319 - EXPORT_SYMBOL(sti_drm_crtc_enable_vblank); 262 + EXPORT_SYMBOL(sti_crtc_enable_vblank); 320 263 321 - void sti_drm_crtc_disable_vblank(struct drm_device *dev, int crtc) 264 + void sti_crtc_disable_vblank(struct drm_device *drm_dev, int crtc) 322 265 { 323 - struct sti_drm_private *priv = dev->dev_private; 266 + struct sti_private *priv = drm_dev->dev_private; 324 267 struct sti_compositor *compo = priv->compo; 325 268 struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb; 326 269 ··· 332 275 333 276 /* free the resources of the pending requests */ 334 277 if (compo->mixer[crtc]->pending_event) { 335 - drm_vblank_put(dev, crtc); 278 + drm_vblank_put(drm_dev, crtc); 336 279 compo->mixer[crtc]->pending_event = NULL; 337 280 } 338 281 } 339 - EXPORT_SYMBOL(sti_drm_crtc_disable_vblank); 282 + EXPORT_SYMBOL(sti_crtc_disable_vblank); 340 283 341 284 static struct drm_crtc_funcs sti_crtc_funcs = { 342 285 .set_config = drm_atomic_helper_set_config, 343 286 .page_flip = drm_atomic_helper_page_flip, 344 - .destroy = sti_drm_crtc_destroy, 345 - .set_property = sti_drm_crtc_set_property, 287 + .destroy = sti_crtc_destroy, 288 + .set_property = sti_crtc_set_property, 346 289 .reset = drm_atomic_helper_crtc_reset, 347 290 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, 348 291 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, 349 292 }; 350 293 351 - bool sti_drm_crtc_is_main(struct drm_crtc *crtc) 294 + bool sti_crtc_is_main(struct drm_crtc *crtc) 352 295 { 353 296 struct sti_mixer *mixer = to_sti_mixer(crtc); 354 297 ··· 357 300 358 301 return false; 359 302 } 360 - EXPORT_SYMBOL(sti_drm_crtc_is_main); 303 + EXPORT_SYMBOL(sti_crtc_is_main); 361 304 362 - int sti_drm_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer, 363 - struct drm_plane *primary, struct drm_plane *cursor) 305 + int sti_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer, 306 + struct drm_plane *primary, struct drm_plane *cursor) 364 307 { 365 308 struct drm_crtc *crtc = &mixer->drm_crtc; 366 309 int res; 367 310 368 311 res = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor, 369 - &sti_crtc_funcs); 312 + &sti_crtc_funcs); 370 313 if (res) { 371 - DRM_ERROR("Can not initialze CRTC\n"); 314 + DRM_ERROR("Can't initialze CRTC\n"); 372 315 return -EINVAL; 373 316 } 374 317
-22
drivers/gpu/drm/sti/sti_drm_crtc.h
··· 1 - /* 2 - * Copyright (C) STMicroelectronics SA 2014 3 - * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics. 4 - * License terms: GNU General Public License (GPL), version 2 5 - */ 6 - 7 - #ifndef _STI_DRM_CRTC_H_ 8 - #define _STI_DRM_CRTC_H_ 9 - 10 - #include <drm/drmP.h> 11 - 12 - struct sti_mixer; 13 - 14 - int sti_drm_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer, 15 - struct drm_plane *primary, struct drm_plane *cursor); 16 - int sti_drm_crtc_enable_vblank(struct drm_device *dev, int crtc); 17 - void sti_drm_crtc_disable_vblank(struct drm_device *dev, int crtc); 18 - int sti_drm_crtc_vblank_cb(struct notifier_block *nb, 19 - unsigned long event, void *data); 20 - bool sti_drm_crtc_is_main(struct drm_crtc *drm_crtc); 21 - 22 - #endif
+57 -90
drivers/gpu/drm/sti/sti_drm_drv.c drivers/gpu/drm/sti/sti_drv.c
··· 18 18 #include <drm/drm_gem_cma_helper.h> 19 19 #include <drm/drm_fb_cma_helper.h> 20 20 21 - #include "sti_drm_drv.h" 22 - #include "sti_drm_crtc.h" 21 + #include "sti_crtc.h" 22 + #include "sti_drv.h" 23 23 24 24 #define DRIVER_NAME "sti" 25 25 #define DRIVER_DESC "STMicroelectronics SoC DRM" ··· 30 30 #define STI_MAX_FB_HEIGHT 4096 31 31 #define STI_MAX_FB_WIDTH 4096 32 32 33 - static void sti_drm_atomic_schedule(struct sti_drm_private *private, 34 - struct drm_atomic_state *state) 33 + static void sti_atomic_schedule(struct sti_private *private, 34 + struct drm_atomic_state *state) 35 35 { 36 36 private->commit.state = state; 37 37 schedule_work(&private->commit.work); 38 38 } 39 39 40 - static void sti_drm_atomic_complete(struct sti_drm_private *private, 41 - struct drm_atomic_state *state) 40 + static void sti_atomic_complete(struct sti_private *private, 41 + struct drm_atomic_state *state) 42 42 { 43 43 struct drm_device *drm = private->drm_dev; 44 44 ··· 68 68 drm_atomic_state_free(state); 69 69 } 70 70 71 - static void sti_drm_atomic_work(struct work_struct *work) 71 + static void sti_atomic_work(struct work_struct *work) 72 72 { 73 - struct sti_drm_private *private = container_of(work, 74 - struct sti_drm_private, commit.work); 73 + struct sti_private *private = container_of(work, 74 + struct sti_private, commit.work); 75 75 76 - sti_drm_atomic_complete(private, private->commit.state); 76 + sti_atomic_complete(private, private->commit.state); 77 77 } 78 78 79 - static int sti_drm_atomic_commit(struct drm_device *drm, 80 - struct drm_atomic_state *state, bool async) 79 + static int sti_atomic_commit(struct drm_device *drm, 80 + struct drm_atomic_state *state, bool async) 81 81 { 82 - struct sti_drm_private *private = drm->dev_private; 82 + struct sti_private *private = drm->dev_private; 83 83 int err; 84 84 85 85 err = drm_atomic_helper_prepare_planes(drm, state); ··· 99 99 drm_atomic_helper_swap_state(drm, state); 100 100 101 101 if (async) 102 - sti_drm_atomic_schedule(private, state); 102 + sti_atomic_schedule(private, state); 103 103 else 104 - sti_drm_atomic_complete(private, state); 104 + sti_atomic_complete(private, state); 105 105 106 106 mutex_unlock(&private->commit.lock); 107 107 return 0; 108 108 } 109 109 110 - static struct drm_mode_config_funcs sti_drm_mode_config_funcs = { 110 + static struct drm_mode_config_funcs sti_mode_config_funcs = { 111 111 .fb_create = drm_fb_cma_create, 112 112 .atomic_check = drm_atomic_helper_check, 113 - .atomic_commit = sti_drm_atomic_commit, 113 + .atomic_commit = sti_atomic_commit, 114 114 }; 115 115 116 - static void sti_drm_mode_config_init(struct drm_device *dev) 116 + static void sti_mode_config_init(struct drm_device *dev) 117 117 { 118 118 dev->mode_config.min_width = 0; 119 119 dev->mode_config.min_height = 0; ··· 126 126 dev->mode_config.max_width = STI_MAX_FB_HEIGHT; 127 127 dev->mode_config.max_height = STI_MAX_FB_WIDTH; 128 128 129 - dev->mode_config.funcs = &sti_drm_mode_config_funcs; 129 + dev->mode_config.funcs = &sti_mode_config_funcs; 130 130 } 131 131 132 - static int sti_drm_load(struct drm_device *dev, unsigned long flags) 132 + static int sti_load(struct drm_device *dev, unsigned long flags) 133 133 { 134 - struct sti_drm_private *private; 134 + struct sti_private *private; 135 135 int ret; 136 136 137 - private = kzalloc(sizeof(struct sti_drm_private), GFP_KERNEL); 137 + private = kzalloc(sizeof(*private), GFP_KERNEL); 138 138 if (!private) { 139 139 DRM_ERROR("Failed to allocate private\n"); 140 140 return -ENOMEM; ··· 143 143 private->drm_dev = dev; 144 144 145 145 mutex_init(&private->commit.lock); 146 - INIT_WORK(&private->commit.work, sti_drm_atomic_work); 146 + INIT_WORK(&private->commit.work, sti_atomic_work); 147 147 148 148 drm_mode_config_init(dev); 149 149 drm_kms_helper_poll_init(dev); 150 150 151 - sti_drm_mode_config_init(dev); 151 + sti_mode_config_init(dev); 152 152 153 153 ret = component_bind_all(dev->dev, dev); 154 154 if (ret) { ··· 162 162 163 163 #ifdef CONFIG_DRM_STI_FBDEV 164 164 drm_fbdev_cma_init(dev, 32, 165 - dev->mode_config.num_crtc, 166 - dev->mode_config.num_connector); 165 + dev->mode_config.num_crtc, 166 + dev->mode_config.num_connector); 167 167 #endif 168 168 return 0; 169 169 } 170 170 171 - static const struct file_operations sti_drm_driver_fops = { 171 + static const struct file_operations sti_driver_fops = { 172 172 .owner = THIS_MODULE, 173 173 .open = drm_open, 174 174 .mmap = drm_gem_cma_mmap, ··· 181 181 .release = drm_release, 182 182 }; 183 183 184 - static struct dma_buf *sti_drm_gem_prime_export(struct drm_device *dev, 185 - struct drm_gem_object *obj, 186 - int flags) 184 + static struct dma_buf *sti_gem_prime_export(struct drm_device *dev, 185 + struct drm_gem_object *obj, 186 + int flags) 187 187 { 188 188 /* we want to be able to write in mmapped buffer */ 189 189 flags |= O_RDWR; 190 190 return drm_gem_prime_export(dev, obj, flags); 191 191 } 192 192 193 - static struct drm_driver sti_drm_driver = { 193 + static struct drm_driver sti_driver = { 194 194 .driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET | 195 195 DRIVER_GEM | DRIVER_PRIME, 196 - .load = sti_drm_load, 196 + .load = sti_load, 197 197 .gem_free_object = drm_gem_cma_free_object, 198 198 .gem_vm_ops = &drm_gem_cma_vm_ops, 199 199 .dumb_create = drm_gem_cma_dumb_create, 200 200 .dumb_map_offset = drm_gem_cma_dumb_map_offset, 201 201 .dumb_destroy = drm_gem_dumb_destroy, 202 - .fops = &sti_drm_driver_fops, 202 + .fops = &sti_driver_fops, 203 203 204 204 .get_vblank_counter = drm_vblank_count, 205 - .enable_vblank = sti_drm_crtc_enable_vblank, 206 - .disable_vblank = sti_drm_crtc_disable_vblank, 205 + .enable_vblank = sti_crtc_enable_vblank, 206 + .disable_vblank = sti_crtc_disable_vblank, 207 207 208 208 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 209 209 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 210 - .gem_prime_export = sti_drm_gem_prime_export, 210 + .gem_prime_export = sti_gem_prime_export, 211 211 .gem_prime_import = drm_gem_prime_import, 212 212 .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table, 213 213 .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table, ··· 227 227 return dev->of_node == data; 228 228 } 229 229 230 - static int sti_drm_bind(struct device *dev) 230 + static int sti_bind(struct device *dev) 231 231 { 232 - return drm_platform_init(&sti_drm_driver, to_platform_device(dev)); 232 + return drm_platform_init(&sti_driver, to_platform_device(dev)); 233 233 } 234 234 235 - static void sti_drm_unbind(struct device *dev) 235 + static void sti_unbind(struct device *dev) 236 236 { 237 237 drm_put_dev(dev_get_drvdata(dev)); 238 238 } 239 239 240 - static const struct component_master_ops sti_drm_ops = { 241 - .bind = sti_drm_bind, 242 - .unbind = sti_drm_unbind, 240 + static const struct component_master_ops sti_ops = { 241 + .bind = sti_bind, 242 + .unbind = sti_unbind, 243 243 }; 244 244 245 - static int sti_drm_master_probe(struct platform_device *pdev) 245 + static int sti_platform_probe(struct platform_device *pdev) 246 246 { 247 247 struct device *dev = &pdev->dev; 248 - struct device_node *node = dev->parent->of_node; 248 + struct device_node *node = dev->of_node; 249 249 struct device_node *child_np; 250 250 struct component_match *match = NULL; 251 251 252 252 dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); 253 + 254 + of_platform_populate(node, NULL, NULL, dev); 253 255 254 256 child_np = of_get_next_available_child(node, NULL); 255 257 ··· 261 259 child_np = of_get_next_available_child(node, child_np); 262 260 } 263 261 264 - return component_master_add_with_match(dev, &sti_drm_ops, match); 262 + return component_master_add_with_match(dev, &sti_ops, match); 265 263 } 266 264 267 - static int sti_drm_master_remove(struct platform_device *pdev) 265 + static int sti_platform_remove(struct platform_device *pdev) 268 266 { 269 - component_master_del(&pdev->dev, &sti_drm_ops); 270 - return 0; 271 - } 272 - 273 - static struct platform_driver sti_drm_master_driver = { 274 - .probe = sti_drm_master_probe, 275 - .remove = sti_drm_master_remove, 276 - .driver = { 277 - .name = DRIVER_NAME "__master", 278 - }, 279 - }; 280 - 281 - static int sti_drm_platform_probe(struct platform_device *pdev) 282 - { 283 - struct device *dev = &pdev->dev; 284 - struct device_node *node = dev->of_node; 285 - struct platform_device *master; 286 - 287 - of_platform_populate(node, NULL, NULL, dev); 288 - 289 - platform_driver_register(&sti_drm_master_driver); 290 - master = platform_device_register_resndata(dev, 291 - DRIVER_NAME "__master", -1, 292 - NULL, 0, NULL, 0); 293 - if (IS_ERR(master)) 294 - return PTR_ERR(master); 295 - 296 - platform_set_drvdata(pdev, master); 297 - return 0; 298 - } 299 - 300 - static int sti_drm_platform_remove(struct platform_device *pdev) 301 - { 302 - struct platform_device *master = platform_get_drvdata(pdev); 303 - 267 + component_master_del(&pdev->dev, &sti_ops); 304 268 of_platform_depopulate(&pdev->dev); 305 - platform_device_unregister(master); 306 - platform_driver_unregister(&sti_drm_master_driver); 269 + 307 270 return 0; 308 271 } 309 272 310 - static const struct of_device_id sti_drm_dt_ids[] = { 273 + static const struct of_device_id sti_dt_ids[] = { 311 274 { .compatible = "st,sti-display-subsystem", }, 312 275 { /* end node */ }, 313 276 }; 314 - MODULE_DEVICE_TABLE(of, sti_drm_dt_ids); 277 + MODULE_DEVICE_TABLE(of, sti_dt_ids); 315 278 316 - static struct platform_driver sti_drm_platform_driver = { 317 - .probe = sti_drm_platform_probe, 318 - .remove = sti_drm_platform_remove, 279 + static struct platform_driver sti_platform_driver = { 280 + .probe = sti_platform_probe, 281 + .remove = sti_platform_remove, 319 282 .driver = { 320 283 .name = DRIVER_NAME, 321 - .of_match_table = sti_drm_dt_ids, 284 + .of_match_table = sti_dt_ids, 322 285 }, 323 286 }; 324 287 325 - module_platform_driver(sti_drm_platform_driver); 288 + module_platform_driver(sti_platform_driver); 326 289 327 290 MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>"); 328 291 MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
+3 -3
drivers/gpu/drm/sti/sti_drm_drv.h drivers/gpu/drm/sti/sti_drv.h
··· 4 4 * License terms: GNU General Public License (GPL), version 2 5 5 */ 6 6 7 - #ifndef _STI_DRM_DRV_H_ 8 - #define _STI_DRM_DRV_H_ 7 + #ifndef _STI_DRV_H_ 8 + #define _STI_DRV_H_ 9 9 10 10 #include <drm/drmP.h> 11 11 ··· 20 20 * @plane_zorder_property: z-order property for CRTC planes 21 21 * @drm_dev: drm device 22 22 */ 23 - struct sti_drm_private { 23 + struct sti_private { 24 24 struct sti_compositor *compo; 25 25 struct drm_property *plane_zorder_property; 26 26 struct drm_device *drm_dev;
-251
drivers/gpu/drm/sti/sti_drm_plane.c
··· 1 - /* 2 - * Copyright (C) STMicroelectronics SA 2014 3 - * Authors: Benjamin Gaignard <benjamin.gaignard@st.com> 4 - * Fabien Dessenne <fabien.dessenne@st.com> 5 - * for STMicroelectronics. 6 - * License terms: GNU General Public License (GPL), version 2 7 - */ 8 - 9 - #include <drm/drmP.h> 10 - #include <drm/drm_atomic_helper.h> 11 - #include <drm/drm_plane_helper.h> 12 - 13 - #include "sti_compositor.h" 14 - #include "sti_drm_drv.h" 15 - #include "sti_drm_plane.h" 16 - #include "sti_vtg.h" 17 - 18 - enum sti_layer_desc sti_layer_default_zorder[] = { 19 - STI_GDP_0, 20 - STI_VID_0, 21 - STI_GDP_1, 22 - STI_VID_1, 23 - STI_GDP_2, 24 - STI_GDP_3, 25 - }; 26 - 27 - /* (Background) < GDP0 < VID0 < GDP1 < VID1 < GDP2 < GDP3 < (ForeGround) */ 28 - 29 - static int 30 - sti_drm_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, 31 - struct drm_framebuffer *fb, int crtc_x, int crtc_y, 32 - unsigned int crtc_w, unsigned int crtc_h, 33 - uint32_t src_x, uint32_t src_y, 34 - uint32_t src_w, uint32_t src_h) 35 - { 36 - struct sti_layer *layer = to_sti_layer(plane); 37 - struct sti_mixer *mixer = to_sti_mixer(crtc); 38 - int res; 39 - 40 - DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n", 41 - crtc->base.id, sti_mixer_to_str(mixer), 42 - plane->base.id, sti_layer_to_str(layer)); 43 - DRM_DEBUG_KMS("(%dx%d)@(%d,%d)\n", crtc_w, crtc_h, crtc_x, crtc_y); 44 - 45 - res = sti_mixer_set_layer_depth(mixer, layer); 46 - if (res) { 47 - DRM_ERROR("Can not set layer depth\n"); 48 - return res; 49 - } 50 - 51 - /* src_x are in 16.16 format. */ 52 - res = sti_layer_prepare(layer, crtc, fb, 53 - &crtc->mode, mixer->id, 54 - crtc_x, crtc_y, crtc_w, crtc_h, 55 - src_x >> 16, src_y >> 16, 56 - src_w >> 16, src_h >> 16); 57 - if (res) { 58 - DRM_ERROR("Layer prepare failed\n"); 59 - return res; 60 - } 61 - 62 - res = sti_layer_commit(layer); 63 - if (res) { 64 - DRM_ERROR("Layer commit failed\n"); 65 - return res; 66 - } 67 - 68 - res = sti_mixer_set_layer_status(mixer, layer, true); 69 - if (res) { 70 - DRM_ERROR("Can not enable layer at mixer\n"); 71 - return res; 72 - } 73 - 74 - return 0; 75 - } 76 - 77 - static int sti_drm_disable_plane(struct drm_plane *plane) 78 - { 79 - struct sti_layer *layer; 80 - struct sti_mixer *mixer; 81 - int lay_res, mix_res; 82 - 83 - if (!plane->crtc) { 84 - DRM_DEBUG_DRIVER("drm plane:%d not enabled\n", plane->base.id); 85 - return 0; 86 - } 87 - layer = to_sti_layer(plane); 88 - mixer = to_sti_mixer(plane->crtc); 89 - 90 - DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n", 91 - plane->crtc->base.id, sti_mixer_to_str(mixer), 92 - plane->base.id, sti_layer_to_str(layer)); 93 - 94 - /* Disable layer at mixer level */ 95 - mix_res = sti_mixer_set_layer_status(mixer, layer, false); 96 - if (mix_res) 97 - DRM_ERROR("Can not disable layer at mixer\n"); 98 - 99 - /* Wait a while to be sure that a Vsync event is received */ 100 - msleep(WAIT_NEXT_VSYNC_MS); 101 - 102 - /* Then disable layer itself */ 103 - lay_res = sti_layer_disable(layer); 104 - if (lay_res) 105 - DRM_ERROR("Layer disable failed\n"); 106 - 107 - if (lay_res || mix_res) 108 - return -EINVAL; 109 - 110 - return 0; 111 - } 112 - 113 - static void sti_drm_plane_destroy(struct drm_plane *plane) 114 - { 115 - DRM_DEBUG_DRIVER("\n"); 116 - 117 - drm_plane_helper_disable(plane); 118 - drm_plane_cleanup(plane); 119 - } 120 - 121 - static int sti_drm_plane_set_property(struct drm_plane *plane, 122 - struct drm_property *property, 123 - uint64_t val) 124 - { 125 - struct drm_device *dev = plane->dev; 126 - struct sti_drm_private *private = dev->dev_private; 127 - struct sti_layer *layer = to_sti_layer(plane); 128 - 129 - DRM_DEBUG_DRIVER("\n"); 130 - 131 - if (property == private->plane_zorder_property) { 132 - layer->zorder = val; 133 - return 0; 134 - } 135 - 136 - return -EINVAL; 137 - } 138 - 139 - static struct drm_plane_funcs sti_drm_plane_funcs = { 140 - .update_plane = drm_atomic_helper_update_plane, 141 - .disable_plane = drm_atomic_helper_disable_plane, 142 - .destroy = sti_drm_plane_destroy, 143 - .set_property = sti_drm_plane_set_property, 144 - .reset = drm_atomic_helper_plane_reset, 145 - .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, 146 - .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, 147 - }; 148 - 149 - static int sti_drm_plane_prepare_fb(struct drm_plane *plane, 150 - struct drm_framebuffer *fb, 151 - const struct drm_plane_state *new_state) 152 - { 153 - return 0; 154 - } 155 - 156 - static void sti_drm_plane_cleanup_fb(struct drm_plane *plane, 157 - struct drm_framebuffer *fb, 158 - const struct drm_plane_state *old_fb) 159 - { 160 - } 161 - 162 - static int sti_drm_plane_atomic_check(struct drm_plane *plane, 163 - struct drm_plane_state *state) 164 - { 165 - return 0; 166 - } 167 - 168 - static void sti_drm_plane_atomic_update(struct drm_plane *plane, 169 - struct drm_plane_state *oldstate) 170 - { 171 - struct drm_plane_state *state = plane->state; 172 - 173 - sti_drm_update_plane(plane, state->crtc, state->fb, 174 - state->crtc_x, state->crtc_y, 175 - state->crtc_w, state->crtc_h, 176 - state->src_x, state->src_y, 177 - state->src_w, state->src_h); 178 - } 179 - 180 - static void sti_drm_plane_atomic_disable(struct drm_plane *plane, 181 - struct drm_plane_state *oldstate) 182 - { 183 - sti_drm_disable_plane(plane); 184 - } 185 - 186 - static const struct drm_plane_helper_funcs sti_drm_plane_helpers_funcs = { 187 - .prepare_fb = sti_drm_plane_prepare_fb, 188 - .cleanup_fb = sti_drm_plane_cleanup_fb, 189 - .atomic_check = sti_drm_plane_atomic_check, 190 - .atomic_update = sti_drm_plane_atomic_update, 191 - .atomic_disable = sti_drm_plane_atomic_disable, 192 - }; 193 - 194 - static void sti_drm_plane_attach_zorder_property(struct drm_plane *plane, 195 - uint64_t default_val) 196 - { 197 - struct drm_device *dev = plane->dev; 198 - struct sti_drm_private *private = dev->dev_private; 199 - struct drm_property *prop; 200 - struct sti_layer *layer = to_sti_layer(plane); 201 - 202 - prop = private->plane_zorder_property; 203 - if (!prop) { 204 - prop = drm_property_create_range(dev, 0, "zpos", 0, 205 - GAM_MIXER_NB_DEPTH_LEVEL - 1); 206 - if (!prop) 207 - return; 208 - 209 - private->plane_zorder_property = prop; 210 - } 211 - 212 - drm_object_attach_property(&plane->base, prop, default_val); 213 - layer->zorder = default_val; 214 - } 215 - 216 - struct drm_plane *sti_drm_plane_init(struct drm_device *dev, 217 - struct sti_layer *layer, 218 - unsigned int possible_crtcs, 219 - enum drm_plane_type type) 220 - { 221 - int err, i; 222 - uint64_t default_zorder = 0; 223 - 224 - err = drm_universal_plane_init(dev, &layer->plane, possible_crtcs, 225 - &sti_drm_plane_funcs, 226 - sti_layer_get_formats(layer), 227 - sti_layer_get_nb_formats(layer), type); 228 - if (err) { 229 - DRM_ERROR("Failed to initialize plane\n"); 230 - return NULL; 231 - } 232 - 233 - drm_plane_helper_add(&layer->plane, &sti_drm_plane_helpers_funcs); 234 - 235 - for (i = 0; i < ARRAY_SIZE(sti_layer_default_zorder); i++) 236 - if (sti_layer_default_zorder[i] == layer->desc) 237 - break; 238 - 239 - default_zorder = i + 1; 240 - 241 - if (type == DRM_PLANE_TYPE_OVERLAY) 242 - sti_drm_plane_attach_zorder_property(&layer->plane, 243 - default_zorder); 244 - 245 - DRM_DEBUG_DRIVER("drm plane:%d mapped to %s with zorder:%llu\n", 246 - layer->plane.base.id, 247 - sti_layer_to_str(layer), default_zorder); 248 - 249 - return &layer->plane; 250 - } 251 - EXPORT_SYMBOL(sti_drm_plane_init);
-18
drivers/gpu/drm/sti/sti_drm_plane.h
··· 1 - /* 2 - * Copyright (C) STMicroelectronics SA 2014 3 - * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics. 4 - * License terms: GNU General Public License (GPL), version 2 5 - */ 6 - 7 - #ifndef _STI_DRM_PLANE_H_ 8 - #define _STI_DRM_PLANE_H_ 9 - 10 - #include <drm/drmP.h> 11 - 12 - struct sti_layer; 13 - 14 - struct drm_plane *sti_drm_plane_init(struct drm_device *dev, 15 - struct sti_layer *layer, 16 - unsigned int possible_crtcs, 17 - enum drm_plane_type type); 18 - #endif
+300 -236
drivers/gpu/drm/sti/sti_gdp.c
··· 9 9 #include <linux/clk.h> 10 10 #include <linux/dma-mapping.h> 11 11 12 + #include <drm/drm_fb_cma_helper.h> 13 + #include <drm/drm_gem_cma_helper.h> 14 + 12 15 #include "sti_compositor.h" 13 16 #include "sti_gdp.h" 14 - #include "sti_layer.h" 17 + #include "sti_plane.h" 15 18 #include "sti_vtg.h" 16 19 17 20 #define ALPHASWITCH BIT(6) ··· 29 26 #define GDP_XBGR8888 (GDP_RGB888_32 | BIGNOTLITTLE | ALPHASWITCH) 30 27 #define GDP_ARGB8565 0x04 31 28 #define GDP_ARGB8888 0x05 32 - #define GDP_ABGR8888 (GDP_ARGB8888 | BIGNOTLITTLE | ALPHASWITCH) 29 + #define GDP_ABGR8888 (GDP_ARGB8888 | BIGNOTLITTLE | ALPHASWITCH) 33 30 #define GDP_ARGB1555 0x06 34 31 #define GDP_ARGB4444 0x07 35 32 #define GDP_CLUT8 0x0B ··· 56 53 #define GAM_GDP_PPT_IGNORE (BIT(1) | BIT(0)) 57 54 #define GAM_GDP_SIZE_MAX 0x7FF 58 55 59 - #define GDP_NODE_NB_BANK 2 60 - #define GDP_NODE_PER_FIELD 2 56 + #define GDP_NODE_NB_BANK 2 57 + #define GDP_NODE_PER_FIELD 2 61 58 62 59 struct sti_gdp_node { 63 60 u32 gam_gdp_ctl; ··· 88 85 /** 89 86 * STI GDP structure 90 87 * 91 - * @layer: layer structure 88 + * @sti_plane: sti_plane structure 89 + * @dev: driver device 90 + * @regs: gdp registers 92 91 * @clk_pix: pixel clock for the current gdp 93 92 * @clk_main_parent: gdp parent clock if main path used 94 93 * @clk_aux_parent: gdp parent clock if aux path used 95 94 * @vtg_field_nb: callback for VTG FIELD (top or bottom) notification 96 95 * @is_curr_top: true if the current node processed is the top field 97 - * @node_list: array of node list 96 + * @node_list: array of node list 98 97 */ 99 98 struct sti_gdp { 100 - struct sti_layer layer; 99 + struct sti_plane plane; 100 + struct device *dev; 101 + void __iomem *regs; 101 102 struct clk *clk_pix; 102 103 struct clk *clk_main_parent; 103 104 struct clk *clk_aux_parent; ··· 110 103 struct sti_gdp_node_list node_list[GDP_NODE_NB_BANK]; 111 104 }; 112 105 113 - #define to_sti_gdp(x) container_of(x, struct sti_gdp, layer) 106 + #define to_sti_gdp(x) container_of(x, struct sti_gdp, plane) 114 107 115 108 static const uint32_t gdp_supported_formats[] = { 116 109 DRM_FORMAT_XRGB8888, ··· 126 119 DRM_FORMAT_VYUY, 127 120 DRM_FORMAT_C8, 128 121 }; 129 - 130 - static const uint32_t *sti_gdp_get_formats(struct sti_layer *layer) 131 - { 132 - return gdp_supported_formats; 133 - } 134 - 135 - static unsigned int sti_gdp_get_nb_formats(struct sti_layer *layer) 136 - { 137 - return ARRAY_SIZE(gdp_supported_formats); 138 - } 139 122 140 123 static int sti_gdp_fourcc2format(int fourcc) 141 124 { ··· 172 175 173 176 /** 174 177 * sti_gdp_get_free_nodes 175 - * @layer: gdp layer 178 + * @gdp: gdp pointer 176 179 * 177 180 * Look for a GDP node list that is not currently read by the HW. 178 181 * 179 182 * RETURNS: 180 183 * Pointer to the free GDP node list 181 184 */ 182 - static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_layer *layer) 185 + static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_gdp *gdp) 183 186 { 184 187 int hw_nvn; 185 - struct sti_gdp *gdp = to_sti_gdp(layer); 186 188 unsigned int i; 187 189 188 - hw_nvn = readl(layer->regs + GAM_GDP_NVN_OFFSET); 190 + hw_nvn = readl(gdp->regs + GAM_GDP_NVN_OFFSET); 189 191 if (!hw_nvn) 190 192 goto end; 191 193 ··· 195 199 196 200 /* in hazardious cases restart with the first node */ 197 201 DRM_ERROR("inconsistent NVN for %s: 0x%08X\n", 198 - sti_layer_to_str(layer), hw_nvn); 202 + sti_plane_to_str(&gdp->plane), hw_nvn); 199 203 200 204 end: 201 205 return &gdp->node_list[0]; ··· 203 207 204 208 /** 205 209 * sti_gdp_get_current_nodes 206 - * @layer: GDP layer 210 + * @gdp: gdp pointer 207 211 * 208 212 * Look for GDP nodes that are currently read by the HW. 209 213 * ··· 211 215 * Pointer to the current GDP node list 212 216 */ 213 217 static 214 - struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_layer *layer) 218 + struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_gdp *gdp) 215 219 { 216 220 int hw_nvn; 217 - struct sti_gdp *gdp = to_sti_gdp(layer); 218 221 unsigned int i; 219 222 220 - hw_nvn = readl(layer->regs + GAM_GDP_NVN_OFFSET); 223 + hw_nvn = readl(gdp->regs + GAM_GDP_NVN_OFFSET); 221 224 if (!hw_nvn) 222 225 goto end; 223 226 ··· 227 232 228 233 end: 229 234 DRM_DEBUG_DRIVER("Warning, NVN 0x%08X for %s does not match any node\n", 230 - hw_nvn, sti_layer_to_str(layer)); 235 + hw_nvn, sti_plane_to_str(&gdp->plane)); 231 236 232 237 return NULL; 233 238 } 234 239 235 240 /** 236 - * sti_gdp_prepare_layer 237 - * @lay: gdp layer 238 - * @first_prepare: true if it is the first time this function is called 239 - * 240 - * Update the free GDP node list according to the layer properties. 241 - * 242 - * RETURNS: 243 - * 0 on success. 244 - */ 245 - static int sti_gdp_prepare_layer(struct sti_layer *layer, bool first_prepare) 246 - { 247 - struct sti_gdp_node_list *list; 248 - struct sti_gdp_node *top_field, *btm_field; 249 - struct drm_display_mode *mode = layer->mode; 250 - struct device *dev = layer->dev; 251 - struct sti_gdp *gdp = to_sti_gdp(layer); 252 - struct sti_compositor *compo = dev_get_drvdata(dev); 253 - int format; 254 - unsigned int depth, bpp; 255 - int rate = mode->clock * 1000; 256 - int res; 257 - u32 ydo, xdo, yds, xds; 258 - 259 - list = sti_gdp_get_free_nodes(layer); 260 - top_field = list->top_field; 261 - btm_field = list->btm_field; 262 - 263 - dev_dbg(dev, "%s %s top_node:0x%p btm_node:0x%p\n", __func__, 264 - sti_layer_to_str(layer), top_field, btm_field); 265 - 266 - /* Build the top field from layer params */ 267 - top_field->gam_gdp_agc = GAM_GDP_AGC_FULL_RANGE; 268 - top_field->gam_gdp_ctl = WAIT_NEXT_VSYNC; 269 - format = sti_gdp_fourcc2format(layer->format); 270 - if (format == -1) { 271 - DRM_ERROR("Format not supported by GDP %.4s\n", 272 - (char *)&layer->format); 273 - return 1; 274 - } 275 - top_field->gam_gdp_ctl |= format; 276 - top_field->gam_gdp_ctl |= sti_gdp_get_alpharange(format); 277 - top_field->gam_gdp_ppt &= ~GAM_GDP_PPT_IGNORE; 278 - 279 - /* pixel memory location */ 280 - drm_fb_get_bpp_depth(layer->format, &depth, &bpp); 281 - top_field->gam_gdp_pml = (u32) layer->paddr + layer->offsets[0]; 282 - top_field->gam_gdp_pml += layer->src_x * (bpp >> 3); 283 - top_field->gam_gdp_pml += layer->src_y * layer->pitches[0]; 284 - 285 - /* input parameters */ 286 - top_field->gam_gdp_pmp = layer->pitches[0]; 287 - top_field->gam_gdp_size = 288 - clamp_val(layer->src_h, 0, GAM_GDP_SIZE_MAX) << 16 | 289 - clamp_val(layer->src_w, 0, GAM_GDP_SIZE_MAX); 290 - 291 - /* output parameters */ 292 - ydo = sti_vtg_get_line_number(*mode, layer->dst_y); 293 - yds = sti_vtg_get_line_number(*mode, layer->dst_y + layer->dst_h - 1); 294 - xdo = sti_vtg_get_pixel_number(*mode, layer->dst_x); 295 - xds = sti_vtg_get_pixel_number(*mode, layer->dst_x + layer->dst_w - 1); 296 - top_field->gam_gdp_vpo = (ydo << 16) | xdo; 297 - top_field->gam_gdp_vps = (yds << 16) | xds; 298 - 299 - /* Same content and chained together */ 300 - memcpy(btm_field, top_field, sizeof(*btm_field)); 301 - top_field->gam_gdp_nvn = list->btm_field_paddr; 302 - btm_field->gam_gdp_nvn = list->top_field_paddr; 303 - 304 - /* Interlaced mode */ 305 - if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE) 306 - btm_field->gam_gdp_pml = top_field->gam_gdp_pml + 307 - layer->pitches[0]; 308 - 309 - if (first_prepare) { 310 - /* Register gdp callback */ 311 - if (sti_vtg_register_client(layer->mixer_id == STI_MIXER_MAIN ? 312 - compo->vtg_main : compo->vtg_aux, 313 - &gdp->vtg_field_nb, layer->mixer_id)) { 314 - DRM_ERROR("Cannot register VTG notifier\n"); 315 - return 1; 316 - } 317 - 318 - /* Set and enable gdp clock */ 319 - if (gdp->clk_pix) { 320 - struct clk *clkp; 321 - /* According to the mixer used, the gdp pixel clock 322 - * should have a different parent clock. */ 323 - if (layer->mixer_id == STI_MIXER_MAIN) 324 - clkp = gdp->clk_main_parent; 325 - else 326 - clkp = gdp->clk_aux_parent; 327 - 328 - if (clkp) 329 - clk_set_parent(gdp->clk_pix, clkp); 330 - 331 - res = clk_set_rate(gdp->clk_pix, rate); 332 - if (res < 0) { 333 - DRM_ERROR("Cannot set rate (%dHz) for gdp\n", 334 - rate); 335 - return 1; 336 - } 337 - 338 - if (clk_prepare_enable(gdp->clk_pix)) { 339 - DRM_ERROR("Failed to prepare/enable gdp\n"); 340 - return 1; 341 - } 342 - } 343 - } 344 - 345 - return 0; 346 - } 347 - 348 - /** 349 - * sti_gdp_commit_layer 350 - * @lay: gdp layer 351 - * 352 - * Update the NVN field of the 'right' field of the current GDP node (being 353 - * used by the HW) with the address of the updated ('free') top field GDP node. 354 - * - In interlaced mode the 'right' field is the bottom field as we update 355 - * frames starting from their top field 356 - * - In progressive mode, we update both bottom and top fields which are 357 - * equal nodes. 358 - * At the next VSYNC, the updated node list will be used by the HW. 359 - * 360 - * RETURNS: 361 - * 0 on success. 362 - */ 363 - static int sti_gdp_commit_layer(struct sti_layer *layer) 364 - { 365 - struct sti_gdp_node_list *updated_list = sti_gdp_get_free_nodes(layer); 366 - struct sti_gdp_node *updated_top_node = updated_list->top_field; 367 - struct sti_gdp_node *updated_btm_node = updated_list->btm_field; 368 - struct sti_gdp *gdp = to_sti_gdp(layer); 369 - u32 dma_updated_top = updated_list->top_field_paddr; 370 - u32 dma_updated_btm = updated_list->btm_field_paddr; 371 - struct sti_gdp_node_list *curr_list = sti_gdp_get_current_nodes(layer); 372 - 373 - dev_dbg(layer->dev, "%s %s top/btm_node:0x%p/0x%p\n", __func__, 374 - sti_layer_to_str(layer), 375 - updated_top_node, updated_btm_node); 376 - dev_dbg(layer->dev, "Current NVN:0x%X\n", 377 - readl(layer->regs + GAM_GDP_NVN_OFFSET)); 378 - dev_dbg(layer->dev, "Posted buff: %lx current buff: %x\n", 379 - (unsigned long)layer->paddr, 380 - readl(layer->regs + GAM_GDP_PML_OFFSET)); 381 - 382 - if (curr_list == NULL) { 383 - /* First update or invalid node should directly write in the 384 - * hw register */ 385 - DRM_DEBUG_DRIVER("%s first update (or invalid node)", 386 - sti_layer_to_str(layer)); 387 - 388 - writel(gdp->is_curr_top == true ? 389 - dma_updated_btm : dma_updated_top, 390 - layer->regs + GAM_GDP_NVN_OFFSET); 391 - return 0; 392 - } 393 - 394 - if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE) { 395 - if (gdp->is_curr_top == true) { 396 - /* Do not update in the middle of the frame, but 397 - * postpone the update after the bottom field has 398 - * been displayed */ 399 - curr_list->btm_field->gam_gdp_nvn = dma_updated_top; 400 - } else { 401 - /* Direct update to avoid one frame delay */ 402 - writel(dma_updated_top, 403 - layer->regs + GAM_GDP_NVN_OFFSET); 404 - } 405 - } else { 406 - /* Direct update for progressive to avoid one frame delay */ 407 - writel(dma_updated_top, layer->regs + GAM_GDP_NVN_OFFSET); 408 - } 409 - 410 - return 0; 411 - } 412 - 413 - /** 414 - * sti_gdp_disable_layer 415 - * @lay: gdp layer 241 + * sti_gdp_disable 242 + * @gdp: gdp pointer 416 243 * 417 244 * Disable a GDP. 418 - * 419 - * RETURNS: 420 - * 0 on success. 421 245 */ 422 - static int sti_gdp_disable_layer(struct sti_layer *layer) 246 + static void sti_gdp_disable(struct sti_gdp *gdp) 423 247 { 248 + struct drm_plane *drm_plane = &gdp->plane.drm_plane; 249 + struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc); 250 + struct sti_compositor *compo = dev_get_drvdata(gdp->dev); 424 251 unsigned int i; 425 - struct sti_gdp *gdp = to_sti_gdp(layer); 426 - struct sti_compositor *compo = dev_get_drvdata(layer->dev); 427 252 428 - DRM_DEBUG_DRIVER("%s\n", sti_layer_to_str(layer)); 253 + DRM_DEBUG_DRIVER("%s\n", sti_plane_to_str(&gdp->plane)); 429 254 430 255 /* Set the nodes as 'to be ignored on mixer' */ 431 256 for (i = 0; i < GDP_NODE_NB_BANK; i++) { ··· 253 438 gdp->node_list[i].btm_field->gam_gdp_ppt |= GAM_GDP_PPT_IGNORE; 254 439 } 255 440 256 - if (sti_vtg_unregister_client(layer->mixer_id == STI_MIXER_MAIN ? 441 + if (sti_vtg_unregister_client(mixer->id == STI_MIXER_MAIN ? 257 442 compo->vtg_main : compo->vtg_aux, &gdp->vtg_field_nb)) 258 443 DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n"); 259 444 260 445 if (gdp->clk_pix) 261 446 clk_disable_unprepare(gdp->clk_pix); 262 447 263 - return 0; 448 + gdp->plane.status = STI_PLANE_DISABLED; 264 449 } 265 450 266 451 /** ··· 279 464 { 280 465 struct sti_gdp *gdp = container_of(nb, struct sti_gdp, vtg_field_nb); 281 466 467 + if (gdp->plane.status == STI_PLANE_FLUSHING) { 468 + /* disable need to be synchronize on vsync event */ 469 + DRM_DEBUG_DRIVER("Vsync event received => disable %s\n", 470 + sti_plane_to_str(&gdp->plane)); 471 + 472 + sti_gdp_disable(gdp); 473 + } 474 + 282 475 switch (event) { 283 476 case VTG_TOP_FIELD_EVENT: 284 477 gdp->is_curr_top = true; ··· 302 479 return 0; 303 480 } 304 481 305 - static void sti_gdp_init(struct sti_layer *layer) 482 + static void sti_gdp_init(struct sti_gdp *gdp) 306 483 { 307 - struct sti_gdp *gdp = to_sti_gdp(layer); 308 - struct device_node *np = layer->dev->of_node; 484 + struct device_node *np = gdp->dev->of_node; 309 485 dma_addr_t dma_addr; 310 486 void *base; 311 487 unsigned int i, size; ··· 312 490 /* Allocate all the nodes within a single memory page */ 313 491 size = sizeof(struct sti_gdp_node) * 314 492 GDP_NODE_PER_FIELD * GDP_NODE_NB_BANK; 315 - base = dma_alloc_writecombine(layer->dev, 316 - size, &dma_addr, GFP_KERNEL | GFP_DMA); 493 + base = dma_alloc_writecombine(gdp->dev, 494 + size, &dma_addr, GFP_KERNEL | GFP_DMA); 317 495 318 496 if (!base) { 319 497 DRM_ERROR("Failed to allocate memory for GDP node\n"); ··· 348 526 /* GDP of STiH407 chip have its own pixel clock */ 349 527 char *clk_name; 350 528 351 - switch (layer->desc) { 529 + switch (gdp->plane.desc) { 352 530 case STI_GDP_0: 353 531 clk_name = "pix_gdp1"; 354 532 break; ··· 366 544 return; 367 545 } 368 546 369 - gdp->clk_pix = devm_clk_get(layer->dev, clk_name); 547 + gdp->clk_pix = devm_clk_get(gdp->dev, clk_name); 370 548 if (IS_ERR(gdp->clk_pix)) 371 549 DRM_ERROR("Cannot get %s clock\n", clk_name); 372 550 373 - gdp->clk_main_parent = devm_clk_get(layer->dev, "main_parent"); 551 + gdp->clk_main_parent = devm_clk_get(gdp->dev, "main_parent"); 374 552 if (IS_ERR(gdp->clk_main_parent)) 375 553 DRM_ERROR("Cannot get main_parent clock\n"); 376 554 377 - gdp->clk_aux_parent = devm_clk_get(layer->dev, "aux_parent"); 555 + gdp->clk_aux_parent = devm_clk_get(gdp->dev, "aux_parent"); 378 556 if (IS_ERR(gdp->clk_aux_parent)) 379 557 DRM_ERROR("Cannot get aux_parent clock\n"); 380 558 } 381 559 } 382 560 383 - static const struct sti_layer_funcs gdp_ops = { 384 - .get_formats = sti_gdp_get_formats, 385 - .get_nb_formats = sti_gdp_get_nb_formats, 386 - .init = sti_gdp_init, 387 - .prepare = sti_gdp_prepare_layer, 388 - .commit = sti_gdp_commit_layer, 389 - .disable = sti_gdp_disable_layer, 561 + static void sti_gdp_atomic_update(struct drm_plane *drm_plane, 562 + struct drm_plane_state *oldstate) 563 + { 564 + struct drm_plane_state *state = drm_plane->state; 565 + struct sti_plane *plane = to_sti_plane(drm_plane); 566 + struct sti_gdp *gdp = to_sti_gdp(plane); 567 + struct drm_crtc *crtc = state->crtc; 568 + struct sti_compositor *compo = dev_get_drvdata(gdp->dev); 569 + struct drm_framebuffer *fb = state->fb; 570 + bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false; 571 + struct sti_mixer *mixer; 572 + struct drm_display_mode *mode; 573 + int dst_x, dst_y, dst_w, dst_h; 574 + int src_x, src_y, src_w, src_h; 575 + struct drm_gem_cma_object *cma_obj; 576 + struct sti_gdp_node_list *list; 577 + struct sti_gdp_node_list *curr_list; 578 + struct sti_gdp_node *top_field, *btm_field; 579 + u32 dma_updated_top; 580 + u32 dma_updated_btm; 581 + int format; 582 + unsigned int depth, bpp; 583 + u32 ydo, xdo, yds, xds; 584 + int res; 585 + 586 + /* Manage the case where crtc is null (disabled) */ 587 + if (!crtc) 588 + return; 589 + 590 + mixer = to_sti_mixer(crtc); 591 + mode = &crtc->mode; 592 + dst_x = state->crtc_x; 593 + dst_y = state->crtc_y; 594 + dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x); 595 + dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y); 596 + /* src_x are in 16.16 format */ 597 + src_x = state->src_x >> 16; 598 + src_y = state->src_y >> 16; 599 + src_w = state->src_w >> 16; 600 + src_h = state->src_h >> 16; 601 + 602 + DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n", 603 + crtc->base.id, sti_mixer_to_str(mixer), 604 + drm_plane->base.id, sti_plane_to_str(plane)); 605 + DRM_DEBUG_KMS("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n", 606 + sti_plane_to_str(plane), 607 + dst_w, dst_h, dst_x, dst_y, 608 + src_w, src_h, src_x, src_y); 609 + 610 + list = sti_gdp_get_free_nodes(gdp); 611 + top_field = list->top_field; 612 + btm_field = list->btm_field; 613 + 614 + dev_dbg(gdp->dev, "%s %s top_node:0x%p btm_node:0x%p\n", __func__, 615 + sti_plane_to_str(plane), top_field, btm_field); 616 + 617 + /* build the top field */ 618 + top_field->gam_gdp_agc = GAM_GDP_AGC_FULL_RANGE; 619 + top_field->gam_gdp_ctl = WAIT_NEXT_VSYNC; 620 + format = sti_gdp_fourcc2format(fb->pixel_format); 621 + if (format == -1) { 622 + DRM_ERROR("Format not supported by GDP %.4s\n", 623 + (char *)&fb->pixel_format); 624 + return; 625 + } 626 + top_field->gam_gdp_ctl |= format; 627 + top_field->gam_gdp_ctl |= sti_gdp_get_alpharange(format); 628 + top_field->gam_gdp_ppt &= ~GAM_GDP_PPT_IGNORE; 629 + 630 + cma_obj = drm_fb_cma_get_gem_obj(fb, 0); 631 + if (!cma_obj) { 632 + DRM_ERROR("Can't get CMA GEM object for fb\n"); 633 + return; 634 + } 635 + 636 + DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id, 637 + (char *)&fb->pixel_format, 638 + (unsigned long)cma_obj->paddr); 639 + 640 + /* pixel memory location */ 641 + drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp); 642 + top_field->gam_gdp_pml = (u32)cma_obj->paddr + fb->offsets[0]; 643 + top_field->gam_gdp_pml += src_x * (bpp >> 3); 644 + top_field->gam_gdp_pml += src_y * fb->pitches[0]; 645 + 646 + /* input parameters */ 647 + top_field->gam_gdp_pmp = fb->pitches[0]; 648 + top_field->gam_gdp_size = clamp_val(src_h, 0, GAM_GDP_SIZE_MAX) << 16 | 649 + clamp_val(src_w, 0, GAM_GDP_SIZE_MAX); 650 + 651 + /* output parameters */ 652 + ydo = sti_vtg_get_line_number(*mode, dst_y); 653 + yds = sti_vtg_get_line_number(*mode, dst_y + dst_h - 1); 654 + xdo = sti_vtg_get_pixel_number(*mode, dst_x); 655 + xds = sti_vtg_get_pixel_number(*mode, dst_x + dst_w - 1); 656 + top_field->gam_gdp_vpo = (ydo << 16) | xdo; 657 + top_field->gam_gdp_vps = (yds << 16) | xds; 658 + 659 + /* Same content and chained together */ 660 + memcpy(btm_field, top_field, sizeof(*btm_field)); 661 + top_field->gam_gdp_nvn = list->btm_field_paddr; 662 + btm_field->gam_gdp_nvn = list->top_field_paddr; 663 + 664 + /* Interlaced mode */ 665 + if (mode->flags & DRM_MODE_FLAG_INTERLACE) 666 + btm_field->gam_gdp_pml = top_field->gam_gdp_pml + 667 + fb->pitches[0]; 668 + 669 + if (first_prepare) { 670 + /* Register gdp callback */ 671 + if (sti_vtg_register_client(mixer->id == STI_MIXER_MAIN ? 672 + compo->vtg_main : compo->vtg_aux, 673 + &gdp->vtg_field_nb, mixer->id)) { 674 + DRM_ERROR("Cannot register VTG notifier\n"); 675 + return; 676 + } 677 + 678 + /* Set and enable gdp clock */ 679 + if (gdp->clk_pix) { 680 + struct clk *clkp; 681 + int rate = mode->clock * 1000; 682 + 683 + /* According to the mixer used, the gdp pixel clock 684 + * should have a different parent clock. */ 685 + if (mixer->id == STI_MIXER_MAIN) 686 + clkp = gdp->clk_main_parent; 687 + else 688 + clkp = gdp->clk_aux_parent; 689 + 690 + if (clkp) 691 + clk_set_parent(gdp->clk_pix, clkp); 692 + 693 + res = clk_set_rate(gdp->clk_pix, rate); 694 + if (res < 0) { 695 + DRM_ERROR("Cannot set rate (%dHz) for gdp\n", 696 + rate); 697 + return; 698 + } 699 + 700 + if (clk_prepare_enable(gdp->clk_pix)) { 701 + DRM_ERROR("Failed to prepare/enable gdp\n"); 702 + return; 703 + } 704 + } 705 + } 706 + 707 + /* Update the NVN field of the 'right' field of the current GDP node 708 + * (being used by the HW) with the address of the updated ('free') top 709 + * field GDP node. 710 + * - In interlaced mode the 'right' field is the bottom field as we 711 + * update frames starting from their top field 712 + * - In progressive mode, we update both bottom and top fields which 713 + * are equal nodes. 714 + * At the next VSYNC, the updated node list will be used by the HW. 715 + */ 716 + curr_list = sti_gdp_get_current_nodes(gdp); 717 + dma_updated_top = list->top_field_paddr; 718 + dma_updated_btm = list->btm_field_paddr; 719 + 720 + dev_dbg(gdp->dev, "Current NVN:0x%X\n", 721 + readl(gdp->regs + GAM_GDP_NVN_OFFSET)); 722 + dev_dbg(gdp->dev, "Posted buff: %lx current buff: %x\n", 723 + (unsigned long)cma_obj->paddr, 724 + readl(gdp->regs + GAM_GDP_PML_OFFSET)); 725 + 726 + if (!curr_list) { 727 + /* First update or invalid node should directly write in the 728 + * hw register */ 729 + DRM_DEBUG_DRIVER("%s first update (or invalid node)", 730 + sti_plane_to_str(plane)); 731 + 732 + writel(gdp->is_curr_top ? 733 + dma_updated_btm : dma_updated_top, 734 + gdp->regs + GAM_GDP_NVN_OFFSET); 735 + goto end; 736 + } 737 + 738 + if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 739 + if (gdp->is_curr_top) { 740 + /* Do not update in the middle of the frame, but 741 + * postpone the update after the bottom field has 742 + * been displayed */ 743 + curr_list->btm_field->gam_gdp_nvn = dma_updated_top; 744 + } else { 745 + /* Direct update to avoid one frame delay */ 746 + writel(dma_updated_top, 747 + gdp->regs + GAM_GDP_NVN_OFFSET); 748 + } 749 + } else { 750 + /* Direct update for progressive to avoid one frame delay */ 751 + writel(dma_updated_top, gdp->regs + GAM_GDP_NVN_OFFSET); 752 + } 753 + 754 + end: 755 + plane->status = STI_PLANE_UPDATED; 756 + } 757 + 758 + static void sti_gdp_atomic_disable(struct drm_plane *drm_plane, 759 + struct drm_plane_state *oldstate) 760 + { 761 + struct sti_plane *plane = to_sti_plane(drm_plane); 762 + struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc); 763 + 764 + if (!drm_plane->crtc) { 765 + DRM_DEBUG_DRIVER("drm plane:%d not enabled\n", 766 + drm_plane->base.id); 767 + return; 768 + } 769 + 770 + DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n", 771 + drm_plane->crtc->base.id, sti_mixer_to_str(mixer), 772 + drm_plane->base.id, sti_plane_to_str(plane)); 773 + 774 + plane->status = STI_PLANE_DISABLING; 775 + } 776 + 777 + static const struct drm_plane_helper_funcs sti_gdp_helpers_funcs = { 778 + .atomic_update = sti_gdp_atomic_update, 779 + .atomic_disable = sti_gdp_atomic_disable, 390 780 }; 391 781 392 - struct sti_layer *sti_gdp_create(struct device *dev, int id) 782 + struct drm_plane *sti_gdp_create(struct drm_device *drm_dev, 783 + struct device *dev, int desc, 784 + void __iomem *baseaddr, 785 + unsigned int possible_crtcs, 786 + enum drm_plane_type type) 393 787 { 394 788 struct sti_gdp *gdp; 789 + int res; 395 790 396 791 gdp = devm_kzalloc(dev, sizeof(*gdp), GFP_KERNEL); 397 792 if (!gdp) { ··· 616 577 return NULL; 617 578 } 618 579 619 - gdp->layer.ops = &gdp_ops; 580 + gdp->dev = dev; 581 + gdp->regs = baseaddr; 582 + gdp->plane.desc = desc; 583 + gdp->plane.status = STI_PLANE_DISABLED; 584 + 620 585 gdp->vtg_field_nb.notifier_call = sti_gdp_field_cb; 621 586 622 - return (struct sti_layer *)gdp; 587 + sti_gdp_init(gdp); 588 + 589 + res = drm_universal_plane_init(drm_dev, &gdp->plane.drm_plane, 590 + possible_crtcs, 591 + &sti_plane_helpers_funcs, 592 + gdp_supported_formats, 593 + ARRAY_SIZE(gdp_supported_formats), 594 + type); 595 + if (res) { 596 + DRM_ERROR("Failed to initialize universal plane\n"); 597 + goto err; 598 + } 599 + 600 + drm_plane_helper_add(&gdp->plane.drm_plane, &sti_gdp_helpers_funcs); 601 + 602 + sti_plane_init_property(&gdp->plane, type); 603 + 604 + return &gdp->plane.drm_plane; 605 + 606 + err: 607 + devm_kfree(dev, gdp); 608 + return NULL; 623 609 }
+5 -2
drivers/gpu/drm/sti/sti_gdp.h
··· 11 11 12 12 #include <linux/types.h> 13 13 14 - struct sti_layer *sti_gdp_create(struct device *dev, int id); 15 - 14 + struct drm_plane *sti_gdp_create(struct drm_device *drm_dev, 15 + struct device *dev, int desc, 16 + void __iomem *baseaddr, 17 + unsigned int possible_crtcs, 18 + enum drm_plane_type type); 16 19 #endif
+13 -14
drivers/gpu/drm/sti/sti_hdmi.c
··· 588 588 return count; 589 589 590 590 fail: 591 - DRM_ERROR("Can not read HDMI EDID\n"); 591 + DRM_ERROR("Can't read HDMI EDID\n"); 592 592 return 0; 593 593 } 594 594 ··· 693 693 struct sti_hdmi_connector *connector; 694 694 struct drm_connector *drm_connector; 695 695 struct drm_bridge *bridge; 696 - struct device_node *ddc; 697 696 int err; 698 - 699 - ddc = of_parse_phandle(dev->of_node, "ddc", 0); 700 - if (ddc) { 701 - hdmi->ddc_adapt = of_find_i2c_adapter_by_node(ddc); 702 - if (!hdmi->ddc_adapt) { 703 - err = -EPROBE_DEFER; 704 - of_node_put(ddc); 705 - return err; 706 - } 707 - 708 - of_node_put(ddc); 709 - } 710 697 711 698 /* Set the drm device handle */ 712 699 hdmi->drm_dev = drm_dev; ··· 783 796 struct sti_hdmi *hdmi; 784 797 struct device_node *np = dev->of_node; 785 798 struct resource *res; 799 + struct device_node *ddc; 786 800 int ret; 787 801 788 802 DRM_INFO("%s\n", __func__); ··· 791 803 hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL); 792 804 if (!hdmi) 793 805 return -ENOMEM; 806 + 807 + ddc = of_parse_phandle(pdev->dev.of_node, "ddc", 0); 808 + if (ddc) { 809 + hdmi->ddc_adapt = of_find_i2c_adapter_by_node(ddc); 810 + if (!hdmi->ddc_adapt) { 811 + of_node_put(ddc); 812 + return -EPROBE_DEFER; 813 + } 814 + 815 + of_node_put(ddc); 816 + } 794 817 795 818 hdmi->dev = pdev->dev; 796 819
+253 -229
drivers/gpu/drm/sti/sti_hqvdp.c
··· 12 12 #include <linux/reset.h> 13 13 14 14 #include <drm/drmP.h> 15 + #include <drm/drm_fb_cma_helper.h> 16 + #include <drm/drm_gem_cma_helper.h> 15 17 16 - #include "sti_drm_plane.h" 17 - #include "sti_hqvdp.h" 18 + #include "sti_compositor.h" 18 19 #include "sti_hqvdp_lut.h" 19 - #include "sti_layer.h" 20 + #include "sti_plane.h" 20 21 #include "sti_vtg.h" 21 22 22 23 /* Firmware name */ ··· 323 322 * @dev: driver device 324 323 * @drm_dev: the drm device 325 324 * @regs: registers 326 - * @layer: layer structure for hqvdp it self 327 - * @vid_plane: VID plug used as link with compositor IP 325 + * @plane: plane structure for hqvdp it self 328 326 * @clk: IP clock 329 327 * @clk_pix_main: pix main clock 330 328 * @reset: reset control ··· 334 334 * @hqvdp_cmd: buffer of commands 335 335 * @hqvdp_cmd_paddr: physical address of hqvdp_cmd 336 336 * @vtg: vtg for main data path 337 + * @xp70_initialized: true if xp70 is already initialized 337 338 */ 338 339 struct sti_hqvdp { 339 340 struct device *dev; 340 341 struct drm_device *drm_dev; 341 342 void __iomem *regs; 342 - struct sti_layer layer; 343 - struct drm_plane *vid_plane; 343 + struct sti_plane plane; 344 344 struct clk *clk; 345 345 struct clk *clk_pix_main; 346 346 struct reset_control *reset; ··· 351 351 void *hqvdp_cmd; 352 352 dma_addr_t hqvdp_cmd_paddr; 353 353 struct sti_vtg *vtg; 354 + bool xp70_initialized; 354 355 }; 355 356 356 - #define to_sti_hqvdp(x) container_of(x, struct sti_hqvdp, layer) 357 + #define to_sti_hqvdp(x) container_of(x, struct sti_hqvdp, plane) 357 358 358 359 static const uint32_t hqvdp_supported_formats[] = { 359 360 DRM_FORMAT_NV12, 360 361 }; 361 - 362 - static const uint32_t *sti_hqvdp_get_formats(struct sti_layer *layer) 363 - { 364 - return hqvdp_supported_formats; 365 - } 366 - 367 - static unsigned int sti_hqvdp_get_nb_formats(struct sti_layer *layer) 368 - { 369 - return ARRAY_SIZE(hqvdp_supported_formats); 370 - } 371 362 372 363 /** 373 364 * sti_hqvdp_get_free_cmd ··· 475 484 476 485 /** 477 486 * sti_hqvdp_check_hw_scaling 478 - * @layer: hqvdp layer 487 + * @hqvdp: hqvdp pointer 488 + * @mode: display mode with timing constraints 489 + * @src_w: source width 490 + * @src_h: source height 491 + * @dst_w: destination width 492 + * @dst_h: destination height 479 493 * 480 494 * Check if the HW is able to perform the scaling request 481 495 * The firmware scaling limitation is "CEIL(1/Zy) <= FLOOR(LFW)" where: ··· 494 498 * RETURNS: 495 499 * True if the HW can scale. 496 500 */ 497 - static bool sti_hqvdp_check_hw_scaling(struct sti_layer *layer) 501 + static bool sti_hqvdp_check_hw_scaling(struct sti_hqvdp *hqvdp, 502 + struct drm_display_mode *mode, 503 + int src_w, int src_h, 504 + int dst_w, int dst_h) 498 505 { 499 - struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer); 500 506 unsigned long lfw; 501 507 unsigned int inv_zy; 502 508 503 - lfw = layer->mode->htotal * (clk_get_rate(hqvdp->clk) / 1000000); 504 - lfw /= max(layer->src_w, layer->dst_w) * layer->mode->clock / 1000; 509 + lfw = mode->htotal * (clk_get_rate(hqvdp->clk) / 1000000); 510 + lfw /= max(src_w, dst_w) * mode->clock / 1000; 505 511 506 - inv_zy = DIV_ROUND_UP(layer->src_h, layer->dst_h); 512 + inv_zy = DIV_ROUND_UP(src_h, dst_h); 507 513 508 514 return (inv_zy <= lfw) ? true : false; 509 515 } 510 516 511 517 /** 512 - * sti_hqvdp_prepare_layer 513 - * @layer: hqvdp layer 514 - * @first_prepare: true if it is the first time this function is called 518 + * sti_hqvdp_disable 519 + * @hqvdp: hqvdp pointer 515 520 * 516 - * Prepares a command for the firmware 517 - * 518 - * RETURNS: 519 - * 0 on success. 521 + * Disables the HQVDP plane 520 522 */ 521 - static int sti_hqvdp_prepare_layer(struct sti_layer *layer, bool first_prepare) 523 + static void sti_hqvdp_disable(struct sti_hqvdp *hqvdp) 522 524 { 523 - struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer); 524 - struct sti_hqvdp_cmd *cmd; 525 - int scale_h, scale_v; 526 - int cmd_offset; 527 - 528 - dev_dbg(hqvdp->dev, "%s %s\n", __func__, sti_layer_to_str(layer)); 529 - 530 - /* prepare and commit VID plane */ 531 - hqvdp->vid_plane->funcs->update_plane(hqvdp->vid_plane, 532 - layer->crtc, layer->fb, 533 - layer->dst_x, layer->dst_y, 534 - layer->dst_w, layer->dst_h, 535 - layer->src_x, layer->src_y, 536 - layer->src_w, layer->src_h); 537 - 538 - cmd_offset = sti_hqvdp_get_free_cmd(hqvdp); 539 - if (cmd_offset == -1) { 540 - DRM_ERROR("No available hqvdp_cmd now\n"); 541 - return -EBUSY; 542 - } 543 - cmd = hqvdp->hqvdp_cmd + cmd_offset; 544 - 545 - if (!sti_hqvdp_check_hw_scaling(layer)) { 546 - DRM_ERROR("Scaling beyond HW capabilities\n"); 547 - return -EINVAL; 548 - } 549 - 550 - /* Static parameters, defaulting to progressive mode */ 551 - cmd->top.config = TOP_CONFIG_PROGRESSIVE; 552 - cmd->top.mem_format = TOP_MEM_FORMAT_DFLT; 553 - cmd->hvsrc.param_ctrl = HVSRC_PARAM_CTRL_DFLT; 554 - cmd->csdi.config = CSDI_CONFIG_PROG; 555 - 556 - /* VC1RE, FMD bypassed : keep everything set to 0 557 - * IQI/P2I bypassed */ 558 - cmd->iqi.config = IQI_CONFIG_DFLT; 559 - cmd->iqi.con_bri = IQI_CON_BRI_DFLT; 560 - cmd->iqi.sat_gain = IQI_SAT_GAIN_DFLT; 561 - cmd->iqi.pxf_conf = IQI_PXF_CONF_DFLT; 562 - 563 - /* Buffer planes address */ 564 - cmd->top.current_luma = (u32) layer->paddr + layer->offsets[0]; 565 - cmd->top.current_chroma = (u32) layer->paddr + layer->offsets[1]; 566 - 567 - /* Pitches */ 568 - cmd->top.luma_processed_pitch = cmd->top.luma_src_pitch = 569 - layer->pitches[0]; 570 - cmd->top.chroma_processed_pitch = cmd->top.chroma_src_pitch = 571 - layer->pitches[1]; 572 - 573 - /* Input / output size 574 - * Align to upper even value */ 575 - layer->dst_w = ALIGN(layer->dst_w, 2); 576 - layer->dst_h = ALIGN(layer->dst_h, 2); 577 - 578 - if ((layer->src_w > MAX_WIDTH) || (layer->src_w < MIN_WIDTH) || 579 - (layer->src_h > MAX_HEIGHT) || (layer->src_h < MIN_HEIGHT) || 580 - (layer->dst_w > MAX_WIDTH) || (layer->dst_w < MIN_WIDTH) || 581 - (layer->dst_h > MAX_HEIGHT) || (layer->dst_h < MIN_HEIGHT)) { 582 - DRM_ERROR("Invalid in/out size %dx%d -> %dx%d\n", 583 - layer->src_w, layer->src_h, 584 - layer->dst_w, layer->dst_h); 585 - return -EINVAL; 586 - } 587 - cmd->top.input_viewport_size = cmd->top.input_frame_size = 588 - layer->src_h << 16 | layer->src_w; 589 - cmd->hvsrc.output_picture_size = layer->dst_h << 16 | layer->dst_w; 590 - cmd->top.input_viewport_ori = layer->src_y << 16 | layer->src_x; 591 - 592 - /* Handle interlaced */ 593 - if (layer->fb->flags & DRM_MODE_FB_INTERLACED) { 594 - /* Top field to display */ 595 - cmd->top.config = TOP_CONFIG_INTER_TOP; 596 - 597 - /* Update pitches and vert size */ 598 - cmd->top.input_frame_size = (layer->src_h / 2) << 16 | 599 - layer->src_w; 600 - cmd->top.luma_processed_pitch *= 2; 601 - cmd->top.luma_src_pitch *= 2; 602 - cmd->top.chroma_processed_pitch *= 2; 603 - cmd->top.chroma_src_pitch *= 2; 604 - 605 - /* Enable directional deinterlacing processing */ 606 - cmd->csdi.config = CSDI_CONFIG_INTER_DIR; 607 - cmd->csdi.config2 = CSDI_CONFIG2_DFLT; 608 - cmd->csdi.dcdi_config = CSDI_DCDI_CONFIG_DFLT; 609 - } 610 - 611 - /* Update hvsrc lut coef */ 612 - scale_h = SCALE_FACTOR * layer->dst_w / layer->src_w; 613 - sti_hqvdp_update_hvsrc(HVSRC_HORI, scale_h, &cmd->hvsrc); 614 - 615 - scale_v = SCALE_FACTOR * layer->dst_h / layer->src_h; 616 - sti_hqvdp_update_hvsrc(HVSRC_VERT, scale_v, &cmd->hvsrc); 617 - 618 - if (first_prepare) { 619 - /* Prevent VTG shutdown */ 620 - if (clk_prepare_enable(hqvdp->clk_pix_main)) { 621 - DRM_ERROR("Failed to prepare/enable pix main clk\n"); 622 - return -ENXIO; 623 - } 624 - 625 - /* Register VTG Vsync callback to handle bottom fields */ 626 - if ((layer->fb->flags & DRM_MODE_FB_INTERLACED) && 627 - sti_vtg_register_client(hqvdp->vtg, 628 - &hqvdp->vtg_nb, layer->mixer_id)) { 629 - DRM_ERROR("Cannot register VTG notifier\n"); 630 - return -ENXIO; 631 - } 632 - } 633 - 634 - return 0; 635 - } 636 - 637 - static int sti_hqvdp_commit_layer(struct sti_layer *layer) 638 - { 639 - struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer); 640 - int cmd_offset; 641 - 642 - dev_dbg(hqvdp->dev, "%s %s\n", __func__, sti_layer_to_str(layer)); 643 - 644 - cmd_offset = sti_hqvdp_get_free_cmd(hqvdp); 645 - if (cmd_offset == -1) { 646 - DRM_ERROR("No available hqvdp_cmd now\n"); 647 - return -EBUSY; 648 - } 649 - 650 - writel(hqvdp->hqvdp_cmd_paddr + cmd_offset, 651 - hqvdp->regs + HQVDP_MBX_NEXT_CMD); 652 - 653 - hqvdp->curr_field_count++; 654 - 655 - /* Interlaced : get ready to display the bottom field at next Vsync */ 656 - if (layer->fb->flags & DRM_MODE_FB_INTERLACED) 657 - hqvdp->btm_field_pending = true; 658 - 659 - dev_dbg(hqvdp->dev, "%s Posted command:0x%x\n", 660 - __func__, hqvdp->hqvdp_cmd_paddr + cmd_offset); 661 - 662 - return 0; 663 - } 664 - 665 - static int sti_hqvdp_disable_layer(struct sti_layer *layer) 666 - { 667 - struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer); 668 525 int i; 669 526 670 - DRM_DEBUG_DRIVER("%s\n", sti_layer_to_str(layer)); 527 + DRM_DEBUG_DRIVER("%s\n", sti_plane_to_str(&hqvdp->plane)); 671 528 672 529 /* Unregister VTG Vsync callback */ 673 - if ((layer->fb->flags & DRM_MODE_FB_INTERLACED) && 674 - sti_vtg_unregister_client(hqvdp->vtg, &hqvdp->vtg_nb)) 530 + if (sti_vtg_unregister_client(hqvdp->vtg, &hqvdp->vtg_nb)) 675 531 DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n"); 676 532 677 533 /* Set next cmd to NULL */ ··· 539 691 /* VTG can stop now */ 540 692 clk_disable_unprepare(hqvdp->clk_pix_main); 541 693 542 - if (i == POLL_MAX_ATTEMPT) { 694 + if (i == POLL_MAX_ATTEMPT) 543 695 DRM_ERROR("XP70 could not revert to idle\n"); 544 - return -ENXIO; 545 - } 546 696 547 - /* disable VID plane */ 548 - hqvdp->vid_plane->funcs->disable_plane(hqvdp->vid_plane); 549 - 550 - return 0; 697 + hqvdp->plane.status = STI_PLANE_DISABLED; 551 698 } 552 699 553 700 /** ··· 565 722 if ((evt != VTG_TOP_FIELD_EVENT) && (evt != VTG_BOTTOM_FIELD_EVENT)) { 566 723 DRM_DEBUG_DRIVER("Unknown event\n"); 567 724 return 0; 725 + } 726 + 727 + if (hqvdp->plane.status == STI_PLANE_FLUSHING) { 728 + /* disable need to be synchronize on vsync event */ 729 + DRM_DEBUG_DRIVER("Vsync event received => disable %s\n", 730 + sti_plane_to_str(&hqvdp->plane)); 731 + 732 + sti_hqvdp_disable(hqvdp); 568 733 } 569 734 570 735 if (hqvdp->btm_field_pending) { ··· 609 758 return 0; 610 759 } 611 760 612 - static struct drm_plane *sti_hqvdp_find_vid(struct drm_device *dev, int id) 761 + static void sti_hqvdp_init(struct sti_hqvdp *hqvdp) 613 762 { 614 - struct drm_plane *plane; 615 - 616 - list_for_each_entry(plane, &dev->mode_config.plane_list, head) { 617 - struct sti_layer *layer = to_sti_layer(plane); 618 - 619 - if (layer->desc == id) 620 - return plane; 621 - } 622 - 623 - return NULL; 624 - } 625 - 626 - static void sti_hqvd_init(struct sti_layer *layer) 627 - { 628 - struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer); 629 763 int size; 630 - 631 - /* find the plane macthing with vid 0 */ 632 - hqvdp->vid_plane = sti_hqvdp_find_vid(hqvdp->drm_dev, STI_VID_0); 633 - if (!hqvdp->vid_plane) { 634 - DRM_ERROR("Cannot find Main video layer\n"); 635 - return; 636 - } 637 764 638 765 hqvdp->vtg_nb.notifier_call = sti_hqvdp_vtg_cb; 639 766 ··· 628 799 memset(hqvdp->hqvdp_cmd, 0, size); 629 800 } 630 801 631 - static const struct sti_layer_funcs hqvdp_ops = { 632 - .get_formats = sti_hqvdp_get_formats, 633 - .get_nb_formats = sti_hqvdp_get_nb_formats, 634 - .init = sti_hqvd_init, 635 - .prepare = sti_hqvdp_prepare_layer, 636 - .commit = sti_hqvdp_commit_layer, 637 - .disable = sti_hqvdp_disable_layer, 802 + static void sti_hqvdp_atomic_update(struct drm_plane *drm_plane, 803 + struct drm_plane_state *oldstate) 804 + { 805 + struct drm_plane_state *state = drm_plane->state; 806 + struct sti_plane *plane = to_sti_plane(drm_plane); 807 + struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane); 808 + struct drm_crtc *crtc = state->crtc; 809 + struct sti_mixer *mixer = to_sti_mixer(crtc); 810 + struct drm_framebuffer *fb = state->fb; 811 + struct drm_display_mode *mode = &crtc->mode; 812 + int dst_x = state->crtc_x; 813 + int dst_y = state->crtc_y; 814 + int dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x); 815 + int dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y); 816 + /* src_x are in 16.16 format */ 817 + int src_x = state->src_x >> 16; 818 + int src_y = state->src_y >> 16; 819 + int src_w = state->src_w >> 16; 820 + int src_h = state->src_h >> 16; 821 + bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false; 822 + struct drm_gem_cma_object *cma_obj; 823 + struct sti_hqvdp_cmd *cmd; 824 + int scale_h, scale_v; 825 + int cmd_offset; 826 + 827 + DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n", 828 + crtc->base.id, sti_mixer_to_str(mixer), 829 + drm_plane->base.id, sti_plane_to_str(plane)); 830 + DRM_DEBUG_KMS("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n", 831 + sti_plane_to_str(plane), 832 + dst_w, dst_h, dst_x, dst_y, 833 + src_w, src_h, src_x, src_y); 834 + 835 + cmd_offset = sti_hqvdp_get_free_cmd(hqvdp); 836 + if (cmd_offset == -1) { 837 + DRM_ERROR("No available hqvdp_cmd now\n"); 838 + return; 839 + } 840 + cmd = hqvdp->hqvdp_cmd + cmd_offset; 841 + 842 + if (!sti_hqvdp_check_hw_scaling(hqvdp, mode, 843 + src_w, src_h, 844 + dst_w, dst_h)) { 845 + DRM_ERROR("Scaling beyond HW capabilities\n"); 846 + return; 847 + } 848 + 849 + /* Static parameters, defaulting to progressive mode */ 850 + cmd->top.config = TOP_CONFIG_PROGRESSIVE; 851 + cmd->top.mem_format = TOP_MEM_FORMAT_DFLT; 852 + cmd->hvsrc.param_ctrl = HVSRC_PARAM_CTRL_DFLT; 853 + cmd->csdi.config = CSDI_CONFIG_PROG; 854 + 855 + /* VC1RE, FMD bypassed : keep everything set to 0 856 + * IQI/P2I bypassed */ 857 + cmd->iqi.config = IQI_CONFIG_DFLT; 858 + cmd->iqi.con_bri = IQI_CON_BRI_DFLT; 859 + cmd->iqi.sat_gain = IQI_SAT_GAIN_DFLT; 860 + cmd->iqi.pxf_conf = IQI_PXF_CONF_DFLT; 861 + 862 + cma_obj = drm_fb_cma_get_gem_obj(fb, 0); 863 + if (!cma_obj) { 864 + DRM_ERROR("Can't get CMA GEM object for fb\n"); 865 + return; 866 + } 867 + 868 + DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id, 869 + (char *)&fb->pixel_format, 870 + (unsigned long)cma_obj->paddr); 871 + 872 + /* Buffer planes address */ 873 + cmd->top.current_luma = (u32)cma_obj->paddr + fb->offsets[0]; 874 + cmd->top.current_chroma = (u32)cma_obj->paddr + fb->offsets[1]; 875 + 876 + /* Pitches */ 877 + cmd->top.luma_processed_pitch = fb->pitches[0]; 878 + cmd->top.luma_src_pitch = fb->pitches[0]; 879 + cmd->top.chroma_processed_pitch = fb->pitches[1]; 880 + cmd->top.chroma_src_pitch = fb->pitches[1]; 881 + 882 + /* Input / output size 883 + * Align to upper even value */ 884 + dst_w = ALIGN(dst_w, 2); 885 + dst_h = ALIGN(dst_h, 2); 886 + 887 + if ((src_w > MAX_WIDTH) || (src_w < MIN_WIDTH) || 888 + (src_h > MAX_HEIGHT) || (src_h < MIN_HEIGHT) || 889 + (dst_w > MAX_WIDTH) || (dst_w < MIN_WIDTH) || 890 + (dst_h > MAX_HEIGHT) || (dst_h < MIN_HEIGHT)) { 891 + DRM_ERROR("Invalid in/out size %dx%d -> %dx%d\n", 892 + src_w, src_h, 893 + dst_w, dst_h); 894 + return; 895 + } 896 + 897 + cmd->top.input_viewport_size = src_h << 16 | src_w; 898 + cmd->top.input_frame_size = src_h << 16 | src_w; 899 + cmd->hvsrc.output_picture_size = dst_h << 16 | dst_w; 900 + cmd->top.input_viewport_ori = src_y << 16 | src_x; 901 + 902 + /* Handle interlaced */ 903 + if (fb->flags & DRM_MODE_FB_INTERLACED) { 904 + /* Top field to display */ 905 + cmd->top.config = TOP_CONFIG_INTER_TOP; 906 + 907 + /* Update pitches and vert size */ 908 + cmd->top.input_frame_size = (src_h / 2) << 16 | src_w; 909 + cmd->top.luma_processed_pitch *= 2; 910 + cmd->top.luma_src_pitch *= 2; 911 + cmd->top.chroma_processed_pitch *= 2; 912 + cmd->top.chroma_src_pitch *= 2; 913 + 914 + /* Enable directional deinterlacing processing */ 915 + cmd->csdi.config = CSDI_CONFIG_INTER_DIR; 916 + cmd->csdi.config2 = CSDI_CONFIG2_DFLT; 917 + cmd->csdi.dcdi_config = CSDI_DCDI_CONFIG_DFLT; 918 + } 919 + 920 + /* Update hvsrc lut coef */ 921 + scale_h = SCALE_FACTOR * dst_w / src_w; 922 + sti_hqvdp_update_hvsrc(HVSRC_HORI, scale_h, &cmd->hvsrc); 923 + 924 + scale_v = SCALE_FACTOR * dst_h / src_h; 925 + sti_hqvdp_update_hvsrc(HVSRC_VERT, scale_v, &cmd->hvsrc); 926 + 927 + if (first_prepare) { 928 + /* Prevent VTG shutdown */ 929 + if (clk_prepare_enable(hqvdp->clk_pix_main)) { 930 + DRM_ERROR("Failed to prepare/enable pix main clk\n"); 931 + return; 932 + } 933 + 934 + /* Register VTG Vsync callback to handle bottom fields */ 935 + if (sti_vtg_register_client(hqvdp->vtg, 936 + &hqvdp->vtg_nb, 937 + mixer->id)) { 938 + DRM_ERROR("Cannot register VTG notifier\n"); 939 + return; 940 + } 941 + } 942 + 943 + writel(hqvdp->hqvdp_cmd_paddr + cmd_offset, 944 + hqvdp->regs + HQVDP_MBX_NEXT_CMD); 945 + 946 + hqvdp->curr_field_count++; 947 + 948 + /* Interlaced : get ready to display the bottom field at next Vsync */ 949 + if (fb->flags & DRM_MODE_FB_INTERLACED) 950 + hqvdp->btm_field_pending = true; 951 + 952 + dev_dbg(hqvdp->dev, "%s Posted command:0x%x\n", 953 + __func__, hqvdp->hqvdp_cmd_paddr + cmd_offset); 954 + 955 + plane->status = STI_PLANE_UPDATED; 956 + } 957 + 958 + static void sti_hqvdp_atomic_disable(struct drm_plane *drm_plane, 959 + struct drm_plane_state *oldstate) 960 + { 961 + struct sti_plane *plane = to_sti_plane(drm_plane); 962 + struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc); 963 + 964 + if (!drm_plane->crtc) { 965 + DRM_DEBUG_DRIVER("drm plane:%d not enabled\n", 966 + drm_plane->base.id); 967 + return; 968 + } 969 + 970 + DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n", 971 + drm_plane->crtc->base.id, sti_mixer_to_str(mixer), 972 + drm_plane->base.id, sti_plane_to_str(plane)); 973 + 974 + plane->status = STI_PLANE_DISABLING; 975 + } 976 + 977 + static const struct drm_plane_helper_funcs sti_hqvdp_helpers_funcs = { 978 + .atomic_update = sti_hqvdp_atomic_update, 979 + .atomic_disable = sti_hqvdp_atomic_disable, 638 980 }; 639 981 640 - struct sti_layer *sti_hqvdp_create(struct device *dev) 982 + static struct drm_plane *sti_hqvdp_create(struct drm_device *drm_dev, 983 + struct device *dev, int desc) 641 984 { 642 985 struct sti_hqvdp *hqvdp = dev_get_drvdata(dev); 986 + int res; 643 987 644 - hqvdp->layer.ops = &hqvdp_ops; 988 + hqvdp->plane.desc = desc; 989 + hqvdp->plane.status = STI_PLANE_DISABLED; 645 990 646 - return &hqvdp->layer; 991 + sti_hqvdp_init(hqvdp); 992 + 993 + res = drm_universal_plane_init(drm_dev, &hqvdp->plane.drm_plane, 1, 994 + &sti_plane_helpers_funcs, 995 + hqvdp_supported_formats, 996 + ARRAY_SIZE(hqvdp_supported_formats), 997 + DRM_PLANE_TYPE_OVERLAY); 998 + if (res) { 999 + DRM_ERROR("Failed to initialize universal plane\n"); 1000 + return NULL; 1001 + } 1002 + 1003 + drm_plane_helper_add(&hqvdp->plane.drm_plane, &sti_hqvdp_helpers_funcs); 1004 + 1005 + sti_plane_init_property(&hqvdp->plane, DRM_PLANE_TYPE_OVERLAY); 1006 + 1007 + return &hqvdp->plane.drm_plane; 647 1008 } 648 - EXPORT_SYMBOL(sti_hqvdp_create); 649 1009 650 1010 static void sti_hqvdp_init_plugs(struct sti_hqvdp *hqvdp) 651 1011 { ··· 877 859 } *header; 878 860 879 861 DRM_DEBUG_DRIVER("\n"); 862 + 863 + if (hqvdp->xp70_initialized) { 864 + DRM_INFO("HQVDP XP70 already initialized\n"); 865 + return; 866 + } 867 + 880 868 /* Check firmware parts */ 881 869 if (!firmware) { 882 870 DRM_ERROR("Firmware not available\n"); ··· 970 946 /* Launch Vsync */ 971 947 writel(SOFT_VSYNC_HW, hqvdp->regs + HQVDP_MBX_SOFT_VSYNC); 972 948 973 - DRM_INFO("HQVDP XP70 started\n"); 949 + DRM_INFO("HQVDP XP70 initialized\n"); 950 + 951 + hqvdp->xp70_initialized = true; 952 + 974 953 out: 975 954 release_firmware(firmware); 976 955 } ··· 982 955 { 983 956 struct sti_hqvdp *hqvdp = dev_get_drvdata(dev); 984 957 struct drm_device *drm_dev = data; 985 - struct sti_layer *layer; 958 + struct drm_plane *plane; 986 959 int err; 987 960 988 961 DRM_DEBUG_DRIVER("\n"); ··· 998 971 return err; 999 972 } 1000 973 1001 - layer = sti_layer_create(hqvdp->dev, STI_HQVDP_0, hqvdp->regs); 1002 - if (!layer) { 974 + /* Create HQVDP plane once xp70 is initialized */ 975 + plane = sti_hqvdp_create(drm_dev, hqvdp->dev, STI_HQVDP_0); 976 + if (!plane) 1003 977 DRM_ERROR("Can't create HQVDP plane\n"); 1004 - return -ENOMEM; 1005 - } 1006 - 1007 - sti_drm_plane_init(drm_dev, layer, 1, DRM_PLANE_TYPE_OVERLAY); 1008 978 1009 979 return 0; 1010 980 }
-12
drivers/gpu/drm/sti/sti_hqvdp.h
··· 1 - /* 2 - * Copyright (C) STMicroelectronics SA 2014 3 - * Authors: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics. 4 - * License terms: GNU General Public License (GPL), version 2 5 - */ 6 - 7 - #ifndef _STI_HQVDP_H_ 8 - #define _STI_HQVDP_H_ 9 - 10 - struct sti_layer *sti_hqvdp_create(struct device *dev); 11 - 12 - #endif
-213
drivers/gpu/drm/sti/sti_layer.c
··· 1 - /* 2 - * Copyright (C) STMicroelectronics SA 2014 3 - * Authors: Benjamin Gaignard <benjamin.gaignard@st.com> 4 - * Fabien Dessenne <fabien.dessenne@st.com> 5 - * for STMicroelectronics. 6 - * License terms: GNU General Public License (GPL), version 2 7 - */ 8 - 9 - #include <drm/drmP.h> 10 - #include <drm/drm_gem_cma_helper.h> 11 - #include <drm/drm_fb_cma_helper.h> 12 - 13 - #include "sti_compositor.h" 14 - #include "sti_cursor.h" 15 - #include "sti_gdp.h" 16 - #include "sti_hqvdp.h" 17 - #include "sti_layer.h" 18 - #include "sti_vid.h" 19 - 20 - const char *sti_layer_to_str(struct sti_layer *layer) 21 - { 22 - switch (layer->desc) { 23 - case STI_GDP_0: 24 - return "GDP0"; 25 - case STI_GDP_1: 26 - return "GDP1"; 27 - case STI_GDP_2: 28 - return "GDP2"; 29 - case STI_GDP_3: 30 - return "GDP3"; 31 - case STI_VID_0: 32 - return "VID0"; 33 - case STI_VID_1: 34 - return "VID1"; 35 - case STI_CURSOR: 36 - return "CURSOR"; 37 - case STI_HQVDP_0: 38 - return "HQVDP0"; 39 - default: 40 - return "<UNKNOWN LAYER>"; 41 - } 42 - } 43 - EXPORT_SYMBOL(sti_layer_to_str); 44 - 45 - struct sti_layer *sti_layer_create(struct device *dev, int desc, 46 - void __iomem *baseaddr) 47 - { 48 - 49 - struct sti_layer *layer = NULL; 50 - 51 - switch (desc & STI_LAYER_TYPE_MASK) { 52 - case STI_GDP: 53 - layer = sti_gdp_create(dev, desc); 54 - break; 55 - case STI_VID: 56 - layer = sti_vid_create(dev); 57 - break; 58 - case STI_CUR: 59 - layer = sti_cursor_create(dev); 60 - break; 61 - case STI_VDP: 62 - layer = sti_hqvdp_create(dev); 63 - break; 64 - } 65 - 66 - if (!layer) { 67 - DRM_ERROR("Failed to create layer\n"); 68 - return NULL; 69 - } 70 - 71 - layer->desc = desc; 72 - layer->dev = dev; 73 - layer->regs = baseaddr; 74 - 75 - layer->ops->init(layer); 76 - 77 - DRM_DEBUG_DRIVER("%s created\n", sti_layer_to_str(layer)); 78 - 79 - return layer; 80 - } 81 - EXPORT_SYMBOL(sti_layer_create); 82 - 83 - int sti_layer_prepare(struct sti_layer *layer, 84 - struct drm_crtc *crtc, 85 - struct drm_framebuffer *fb, 86 - struct drm_display_mode *mode, int mixer_id, 87 - int dest_x, int dest_y, int dest_w, int dest_h, 88 - int src_x, int src_y, int src_w, int src_h) 89 - { 90 - int ret; 91 - unsigned int i; 92 - struct drm_gem_cma_object *cma_obj; 93 - 94 - if (!layer || !fb || !mode) { 95 - DRM_ERROR("Null fb, layer or mode\n"); 96 - return 1; 97 - } 98 - 99 - cma_obj = drm_fb_cma_get_gem_obj(fb, 0); 100 - if (!cma_obj) { 101 - DRM_ERROR("Can't get CMA GEM object for fb\n"); 102 - return 1; 103 - } 104 - 105 - layer->crtc = crtc; 106 - layer->fb = fb; 107 - layer->mode = mode; 108 - layer->mixer_id = mixer_id; 109 - layer->dst_x = dest_x; 110 - layer->dst_y = dest_y; 111 - layer->dst_w = clamp_val(dest_w, 0, mode->crtc_hdisplay - dest_x); 112 - layer->dst_h = clamp_val(dest_h, 0, mode->crtc_vdisplay - dest_y); 113 - layer->src_x = src_x; 114 - layer->src_y = src_y; 115 - layer->src_w = src_w; 116 - layer->src_h = src_h; 117 - layer->format = fb->pixel_format; 118 - layer->vaddr = cma_obj->vaddr; 119 - layer->paddr = cma_obj->paddr; 120 - for (i = 0; i < 4; i++) { 121 - layer->pitches[i] = fb->pitches[i]; 122 - layer->offsets[i] = fb->offsets[i]; 123 - } 124 - 125 - DRM_DEBUG_DRIVER("%s is associated with mixer_id %d\n", 126 - sti_layer_to_str(layer), 127 - layer->mixer_id); 128 - DRM_DEBUG_DRIVER("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n", 129 - sti_layer_to_str(layer), 130 - layer->dst_w, layer->dst_h, layer->dst_x, layer->dst_y, 131 - layer->src_w, layer->src_h, layer->src_x, 132 - layer->src_y); 133 - 134 - DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id, 135 - (char *)&layer->format, (unsigned long)layer->paddr); 136 - 137 - if (!layer->ops->prepare) 138 - goto err_no_prepare; 139 - 140 - ret = layer->ops->prepare(layer, !layer->enabled); 141 - if (!ret) 142 - layer->enabled = true; 143 - 144 - return ret; 145 - 146 - err_no_prepare: 147 - DRM_ERROR("Cannot prepare\n"); 148 - return 1; 149 - } 150 - 151 - int sti_layer_commit(struct sti_layer *layer) 152 - { 153 - if (!layer) 154 - return 1; 155 - 156 - if (!layer->ops->commit) 157 - goto err_no_commit; 158 - 159 - return layer->ops->commit(layer); 160 - 161 - err_no_commit: 162 - DRM_ERROR("Cannot commit\n"); 163 - return 1; 164 - } 165 - 166 - int sti_layer_disable(struct sti_layer *layer) 167 - { 168 - int ret; 169 - 170 - DRM_DEBUG_DRIVER("%s\n", sti_layer_to_str(layer)); 171 - if (!layer) 172 - return 1; 173 - 174 - if (!layer->enabled) 175 - return 0; 176 - 177 - if (!layer->ops->disable) 178 - goto err_no_disable; 179 - 180 - ret = layer->ops->disable(layer); 181 - if (!ret) 182 - layer->enabled = false; 183 - else 184 - DRM_ERROR("Disable failed\n"); 185 - 186 - return ret; 187 - 188 - err_no_disable: 189 - DRM_ERROR("Cannot disable\n"); 190 - return 1; 191 - } 192 - 193 - const uint32_t *sti_layer_get_formats(struct sti_layer *layer) 194 - { 195 - if (!layer) 196 - return NULL; 197 - 198 - if (!layer->ops->get_formats) 199 - return NULL; 200 - 201 - return layer->ops->get_formats(layer); 202 - } 203 - 204 - unsigned int sti_layer_get_nb_formats(struct sti_layer *layer) 205 - { 206 - if (!layer) 207 - return 0; 208 - 209 - if (!layer->ops->get_nb_formats) 210 - return 0; 211 - 212 - return layer->ops->get_nb_formats(layer); 213 - }
-131
drivers/gpu/drm/sti/sti_layer.h
··· 1 - /* 2 - * Copyright (C) STMicroelectronics SA 2014 3 - * Authors: Benjamin Gaignard <benjamin.gaignard@st.com> 4 - * Fabien Dessenne <fabien.dessenne@st.com> 5 - * for STMicroelectronics. 6 - * License terms: GNU General Public License (GPL), version 2 7 - */ 8 - 9 - #ifndef _STI_LAYER_H_ 10 - #define _STI_LAYER_H_ 11 - 12 - #include <drm/drmP.h> 13 - 14 - #define to_sti_layer(x) container_of(x, struct sti_layer, plane) 15 - 16 - #define STI_LAYER_TYPE_SHIFT 8 17 - #define STI_LAYER_TYPE_MASK (~((1<<STI_LAYER_TYPE_SHIFT)-1)) 18 - 19 - struct sti_layer; 20 - 21 - enum sti_layer_type { 22 - STI_GDP = 1 << STI_LAYER_TYPE_SHIFT, 23 - STI_VID = 2 << STI_LAYER_TYPE_SHIFT, 24 - STI_CUR = 3 << STI_LAYER_TYPE_SHIFT, 25 - STI_BCK = 4 << STI_LAYER_TYPE_SHIFT, 26 - STI_VDP = 5 << STI_LAYER_TYPE_SHIFT 27 - }; 28 - 29 - enum sti_layer_id_of_type { 30 - STI_ID_0 = 0, 31 - STI_ID_1 = 1, 32 - STI_ID_2 = 2, 33 - STI_ID_3 = 3 34 - }; 35 - 36 - enum sti_layer_desc { 37 - STI_GDP_0 = STI_GDP | STI_ID_0, 38 - STI_GDP_1 = STI_GDP | STI_ID_1, 39 - STI_GDP_2 = STI_GDP | STI_ID_2, 40 - STI_GDP_3 = STI_GDP | STI_ID_3, 41 - STI_VID_0 = STI_VID | STI_ID_0, 42 - STI_VID_1 = STI_VID | STI_ID_1, 43 - STI_HQVDP_0 = STI_VDP | STI_ID_0, 44 - STI_CURSOR = STI_CUR, 45 - STI_BACK = STI_BCK 46 - }; 47 - 48 - /** 49 - * STI layer functions structure 50 - * 51 - * @get_formats: get layer supported formats 52 - * @get_nb_formats: get number of format supported 53 - * @init: initialize the layer 54 - * @prepare: prepare layer before rendering 55 - * @commit: set layer for rendering 56 - * @disable: disable layer 57 - */ 58 - struct sti_layer_funcs { 59 - const uint32_t* (*get_formats)(struct sti_layer *layer); 60 - unsigned int (*get_nb_formats)(struct sti_layer *layer); 61 - void (*init)(struct sti_layer *layer); 62 - int (*prepare)(struct sti_layer *layer, bool first_prepare); 63 - int (*commit)(struct sti_layer *layer); 64 - int (*disable)(struct sti_layer *layer); 65 - }; 66 - 67 - /** 68 - * STI layer structure 69 - * 70 - * @plane: drm plane it is bound to (if any) 71 - * @fb: drm fb it is bound to 72 - * @crtc: crtc it is bound to 73 - * @mode: display mode 74 - * @desc: layer type & id 75 - * @device: driver device 76 - * @regs: layer registers 77 - * @ops: layer functions 78 - * @zorder: layer z-order 79 - * @mixer_id: id of the mixer used to display the layer 80 - * @enabled: to know if the layer is active or not 81 - * @src_x src_y: coordinates of the input (fb) area 82 - * @src_w src_h: size of the input (fb) area 83 - * @dst_x dst_y: coordinates of the output (crtc) area 84 - * @dst_w dst_h: size of the output (crtc) area 85 - * @format: format 86 - * @pitches: pitch of 'planes' (eg: Y, U, V) 87 - * @offsets: offset of 'planes' 88 - * @vaddr: virtual address of the input buffer 89 - * @paddr: physical address of the input buffer 90 - */ 91 - struct sti_layer { 92 - struct drm_plane plane; 93 - struct drm_framebuffer *fb; 94 - struct drm_crtc *crtc; 95 - struct drm_display_mode *mode; 96 - enum sti_layer_desc desc; 97 - struct device *dev; 98 - void __iomem *regs; 99 - const struct sti_layer_funcs *ops; 100 - int zorder; 101 - int mixer_id; 102 - bool enabled; 103 - int src_x, src_y; 104 - int src_w, src_h; 105 - int dst_x, dst_y; 106 - int dst_w, dst_h; 107 - uint32_t format; 108 - unsigned int pitches[4]; 109 - unsigned int offsets[4]; 110 - void *vaddr; 111 - dma_addr_t paddr; 112 - }; 113 - 114 - struct sti_layer *sti_layer_create(struct device *dev, int desc, 115 - void __iomem *baseaddr); 116 - int sti_layer_prepare(struct sti_layer *layer, 117 - struct drm_crtc *crtc, 118 - struct drm_framebuffer *fb, 119 - struct drm_display_mode *mode, 120 - int mixer_id, 121 - int dest_x, int dest_y, 122 - int dest_w, int dest_h, 123 - int src_x, int src_y, 124 - int src_w, int src_h); 125 - int sti_layer_commit(struct sti_layer *layer); 126 - int sti_layer_disable(struct sti_layer *layer); 127 - const uint32_t *sti_layer_get_formats(struct sti_layer *layer); 128 - unsigned int sti_layer_get_nb_formats(struct sti_layer *layer); 129 - const char *sti_layer_to_str(struct sti_layer *layer); 130 - 131 - #endif
+33 -39
drivers/gpu/drm/sti/sti_mixer.c
··· 58 58 return "<UNKNOWN MIXER>"; 59 59 } 60 60 } 61 + EXPORT_SYMBOL(sti_mixer_to_str); 61 62 62 63 static inline u32 sti_mixer_reg_read(struct sti_mixer *mixer, u32 reg_id) 63 64 { ··· 102 101 sti_mixer_reg_write(mixer, GAM_MIXER_BCS, yds << 16 | xds); 103 102 } 104 103 105 - int sti_mixer_set_layer_depth(struct sti_mixer *mixer, struct sti_layer *layer) 104 + int sti_mixer_set_plane_depth(struct sti_mixer *mixer, struct sti_plane *plane) 106 105 { 107 - int layer_id = 0, depth = layer->zorder; 106 + int plane_id, depth = plane->zorder; 107 + unsigned int i; 108 108 u32 mask, val; 109 109 110 - if (depth >= GAM_MIXER_NB_DEPTH_LEVEL) 110 + if ((depth < 1) || (depth > GAM_MIXER_NB_DEPTH_LEVEL)) 111 111 return 1; 112 112 113 - switch (layer->desc) { 113 + switch (plane->desc) { 114 114 case STI_GDP_0: 115 - layer_id = GAM_DEPTH_GDP0_ID; 115 + plane_id = GAM_DEPTH_GDP0_ID; 116 116 break; 117 117 case STI_GDP_1: 118 - layer_id = GAM_DEPTH_GDP1_ID; 118 + plane_id = GAM_DEPTH_GDP1_ID; 119 119 break; 120 120 case STI_GDP_2: 121 - layer_id = GAM_DEPTH_GDP2_ID; 121 + plane_id = GAM_DEPTH_GDP2_ID; 122 122 break; 123 123 case STI_GDP_3: 124 - layer_id = GAM_DEPTH_GDP3_ID; 124 + plane_id = GAM_DEPTH_GDP3_ID; 125 125 break; 126 - case STI_VID_0: 127 126 case STI_HQVDP_0: 128 - layer_id = GAM_DEPTH_VID0_ID; 129 - break; 130 - case STI_VID_1: 131 - layer_id = GAM_DEPTH_VID1_ID; 127 + plane_id = GAM_DEPTH_VID0_ID; 132 128 break; 133 129 case STI_CURSOR: 134 130 /* no need to set depth for cursor */ 135 131 return 0; 136 132 default: 137 - DRM_ERROR("Unknown layer %d\n", layer->desc); 133 + DRM_ERROR("Unknown plane %d\n", plane->desc); 138 134 return 1; 139 135 } 140 - mask = GAM_DEPTH_MASK_ID << (3 * depth); 141 - layer_id = layer_id << (3 * depth); 136 + 137 + /* Search if a previous depth was already assigned to the plane */ 138 + val = sti_mixer_reg_read(mixer, GAM_MIXER_CRB); 139 + for (i = 0; i < GAM_MIXER_NB_DEPTH_LEVEL; i++) { 140 + mask = GAM_DEPTH_MASK_ID << (3 * i); 141 + if ((val & mask) == plane_id << (3 * i)) 142 + break; 143 + } 144 + 145 + mask |= GAM_DEPTH_MASK_ID << (3 * (depth - 1)); 146 + plane_id = plane_id << (3 * (depth - 1)); 142 147 143 148 DRM_DEBUG_DRIVER("%s %s depth=%d\n", sti_mixer_to_str(mixer), 144 - sti_layer_to_str(layer), depth); 149 + sti_plane_to_str(plane), depth); 145 150 dev_dbg(mixer->dev, "GAM_MIXER_CRB val 0x%x mask 0x%x\n", 146 - layer_id, mask); 151 + plane_id, mask); 147 152 148 - val = sti_mixer_reg_read(mixer, GAM_MIXER_CRB); 149 153 val &= ~mask; 150 - val |= layer_id; 154 + val |= plane_id; 151 155 sti_mixer_reg_write(mixer, GAM_MIXER_CRB, val); 152 156 153 157 dev_dbg(mixer->dev, "Read GAM_MIXER_CRB 0x%x\n", ··· 182 176 return 0; 183 177 } 184 178 185 - static u32 sti_mixer_get_layer_mask(struct sti_layer *layer) 179 + static u32 sti_mixer_get_plane_mask(struct sti_plane *plane) 186 180 { 187 - switch (layer->desc) { 181 + switch (plane->desc) { 188 182 case STI_BACK: 189 183 return GAM_CTL_BACK_MASK; 190 184 case STI_GDP_0: ··· 195 189 return GAM_CTL_GDP2_MASK; 196 190 case STI_GDP_3: 197 191 return GAM_CTL_GDP3_MASK; 198 - case STI_VID_0: 199 192 case STI_HQVDP_0: 200 193 return GAM_CTL_VID0_MASK; 201 - case STI_VID_1: 202 - return GAM_CTL_VID1_MASK; 203 194 case STI_CURSOR: 204 195 return GAM_CTL_CURSOR_MASK; 205 196 default: ··· 204 201 } 205 202 } 206 203 207 - int sti_mixer_set_layer_status(struct sti_mixer *mixer, 208 - struct sti_layer *layer, bool status) 204 + int sti_mixer_set_plane_status(struct sti_mixer *mixer, 205 + struct sti_plane *plane, bool status) 209 206 { 210 207 u32 mask, val; 211 208 212 209 DRM_DEBUG_DRIVER("%s %s %s\n", status ? "enable" : "disable", 213 - sti_mixer_to_str(mixer), sti_layer_to_str(layer)); 210 + sti_mixer_to_str(mixer), sti_plane_to_str(plane)); 214 211 215 - mask = sti_mixer_get_layer_mask(layer); 212 + mask = sti_mixer_get_plane_mask(plane); 216 213 if (!mask) { 217 - DRM_ERROR("Can not find layer mask\n"); 214 + DRM_ERROR("Can't find layer mask\n"); 218 215 return -EINVAL; 219 216 } 220 217 ··· 224 221 sti_mixer_reg_write(mixer, GAM_MIXER_CTL, val); 225 222 226 223 return 0; 227 - } 228 - 229 - void sti_mixer_clear_all_layers(struct sti_mixer *mixer) 230 - { 231 - u32 val; 232 - 233 - DRM_DEBUG_DRIVER("%s clear all layer\n", sti_mixer_to_str(mixer)); 234 - val = sti_mixer_reg_read(mixer, GAM_MIXER_CTL) & 0xFFFF0000; 235 - sti_mixer_reg_write(mixer, GAM_MIXER_CTL, val); 236 224 } 237 225 238 226 void sti_mixer_set_matrix(struct sti_mixer *mixer)
+16 -11
drivers/gpu/drm/sti/sti_mixer.h
··· 11 11 12 12 #include <drm/drmP.h> 13 13 14 - #include "sti_layer.h" 14 + #include "sti_plane.h" 15 15 16 16 #define to_sti_mixer(x) container_of(x, struct sti_mixer, drm_crtc) 17 + 18 + enum sti_mixer_status { 19 + STI_MIXER_READY, 20 + STI_MIXER_DISABLING, 21 + STI_MIXER_DISABLED, 22 + }; 17 23 18 24 /** 19 25 * STI Mixer subdevice structure ··· 29 23 * @id: id of the mixer 30 24 * @drm_crtc: crtc object link to the mixer 31 25 * @pending_event: set if a flip event is pending on crtc 32 - * @enabled: to know if the mixer is active or not 26 + * @status: to know the status of the mixer 33 27 */ 34 28 struct sti_mixer { 35 29 struct device *dev; 36 30 void __iomem *regs; 37 31 int id; 38 - struct drm_crtc drm_crtc; 32 + struct drm_crtc drm_crtc; 39 33 struct drm_pending_vblank_event *pending_event; 40 - bool enabled; 34 + enum sti_mixer_status status; 41 35 }; 42 36 43 37 const char *sti_mixer_to_str(struct sti_mixer *mixer); 44 38 45 39 struct sti_mixer *sti_mixer_create(struct device *dev, int id, 46 - void __iomem *baseaddr); 40 + void __iomem *baseaddr); 47 41 48 - int sti_mixer_set_layer_status(struct sti_mixer *mixer, 49 - struct sti_layer *layer, bool status); 50 - void sti_mixer_clear_all_layers(struct sti_mixer *mixer); 51 - int sti_mixer_set_layer_depth(struct sti_mixer *mixer, struct sti_layer *layer); 42 + int sti_mixer_set_plane_status(struct sti_mixer *mixer, 43 + struct sti_plane *plane, bool status); 44 + int sti_mixer_set_plane_depth(struct sti_mixer *mixer, struct sti_plane *plane); 52 45 int sti_mixer_active_video_area(struct sti_mixer *mixer, 53 - struct drm_display_mode *mode); 46 + struct drm_display_mode *mode); 54 47 55 48 void sti_mixer_set_background_status(struct sti_mixer *mixer, bool enable); 56 49 57 50 /* depth in Cross-bar control = z order */ 58 - #define GAM_MIXER_NB_DEPTH_LEVEL 7 51 + #define GAM_MIXER_NB_DEPTH_LEVEL 6 59 52 60 53 #define STI_MIXER_MAIN 0 61 54 #define STI_MIXER_AUX 1
+122
drivers/gpu/drm/sti/sti_plane.c
··· 1 + /* 2 + * Copyright (C) STMicroelectronics SA 2014 3 + * Authors: Benjamin Gaignard <benjamin.gaignard@st.com> 4 + * Fabien Dessenne <fabien.dessenne@st.com> 5 + * for STMicroelectronics. 6 + * License terms: GNU General Public License (GPL), version 2 7 + */ 8 + 9 + #include <drm/drmP.h> 10 + #include <drm/drm_fb_cma_helper.h> 11 + #include <drm/drm_gem_cma_helper.h> 12 + 13 + #include "sti_compositor.h" 14 + #include "sti_drv.h" 15 + #include "sti_plane.h" 16 + 17 + /* (Background) < GDP0 < GDP1 < HQVDP0 < GDP2 < GDP3 < (ForeGround) */ 18 + enum sti_plane_desc sti_plane_default_zorder[] = { 19 + STI_GDP_0, 20 + STI_GDP_1, 21 + STI_HQVDP_0, 22 + STI_GDP_2, 23 + STI_GDP_3, 24 + }; 25 + 26 + const char *sti_plane_to_str(struct sti_plane *plane) 27 + { 28 + switch (plane->desc) { 29 + case STI_GDP_0: 30 + return "GDP0"; 31 + case STI_GDP_1: 32 + return "GDP1"; 33 + case STI_GDP_2: 34 + return "GDP2"; 35 + case STI_GDP_3: 36 + return "GDP3"; 37 + case STI_HQVDP_0: 38 + return "HQVDP0"; 39 + case STI_CURSOR: 40 + return "CURSOR"; 41 + default: 42 + return "<UNKNOWN PLANE>"; 43 + } 44 + } 45 + EXPORT_SYMBOL(sti_plane_to_str); 46 + 47 + static void sti_plane_destroy(struct drm_plane *drm_plane) 48 + { 49 + DRM_DEBUG_DRIVER("\n"); 50 + 51 + drm_plane_helper_disable(drm_plane); 52 + drm_plane_cleanup(drm_plane); 53 + } 54 + 55 + static int sti_plane_set_property(struct drm_plane *drm_plane, 56 + struct drm_property *property, 57 + uint64_t val) 58 + { 59 + struct drm_device *dev = drm_plane->dev; 60 + struct sti_private *private = dev->dev_private; 61 + struct sti_plane *plane = to_sti_plane(drm_plane); 62 + 63 + DRM_DEBUG_DRIVER("\n"); 64 + 65 + if (property == private->plane_zorder_property) { 66 + plane->zorder = val; 67 + return 0; 68 + } 69 + 70 + return -EINVAL; 71 + } 72 + 73 + static void sti_plane_attach_zorder_property(struct drm_plane *drm_plane) 74 + { 75 + struct drm_device *dev = drm_plane->dev; 76 + struct sti_private *private = dev->dev_private; 77 + struct sti_plane *plane = to_sti_plane(drm_plane); 78 + struct drm_property *prop; 79 + 80 + prop = private->plane_zorder_property; 81 + if (!prop) { 82 + prop = drm_property_create_range(dev, 0, "zpos", 1, 83 + GAM_MIXER_NB_DEPTH_LEVEL); 84 + if (!prop) 85 + return; 86 + 87 + private->plane_zorder_property = prop; 88 + } 89 + 90 + drm_object_attach_property(&drm_plane->base, prop, plane->zorder); 91 + } 92 + 93 + void sti_plane_init_property(struct sti_plane *plane, 94 + enum drm_plane_type type) 95 + { 96 + unsigned int i; 97 + 98 + for (i = 0; i < ARRAY_SIZE(sti_plane_default_zorder); i++) 99 + if (sti_plane_default_zorder[i] == plane->desc) 100 + break; 101 + 102 + plane->zorder = i + 1; 103 + 104 + if (type == DRM_PLANE_TYPE_OVERLAY) 105 + sti_plane_attach_zorder_property(&plane->drm_plane); 106 + 107 + DRM_DEBUG_DRIVER("drm plane:%d mapped to %s with zorder:%d\n", 108 + plane->drm_plane.base.id, 109 + sti_plane_to_str(plane), plane->zorder); 110 + } 111 + EXPORT_SYMBOL(sti_plane_init_property); 112 + 113 + struct drm_plane_funcs sti_plane_helpers_funcs = { 114 + .update_plane = drm_atomic_helper_update_plane, 115 + .disable_plane = drm_atomic_helper_disable_plane, 116 + .destroy = sti_plane_destroy, 117 + .set_property = sti_plane_set_property, 118 + .reset = drm_atomic_helper_plane_reset, 119 + .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, 120 + .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, 121 + }; 122 + EXPORT_SYMBOL(sti_plane_helpers_funcs);
+71
drivers/gpu/drm/sti/sti_plane.h
··· 1 + /* 2 + * Copyright (C) STMicroelectronics SA 2014 3 + * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics. 4 + * License terms: GNU General Public License (GPL), version 2 5 + */ 6 + 7 + #ifndef _STI_PLANE_H_ 8 + #define _STI_PLANE_H_ 9 + 10 + #include <drm/drmP.h> 11 + #include <drm/drm_atomic_helper.h> 12 + #include <drm/drm_plane_helper.h> 13 + 14 + extern struct drm_plane_funcs sti_plane_helpers_funcs; 15 + 16 + #define to_sti_plane(x) container_of(x, struct sti_plane, drm_plane) 17 + 18 + #define STI_PLANE_TYPE_SHIFT 8 19 + #define STI_PLANE_TYPE_MASK (~((1 << STI_PLANE_TYPE_SHIFT) - 1)) 20 + 21 + enum sti_plane_type { 22 + STI_GDP = 1 << STI_PLANE_TYPE_SHIFT, 23 + STI_VDP = 2 << STI_PLANE_TYPE_SHIFT, 24 + STI_CUR = 3 << STI_PLANE_TYPE_SHIFT, 25 + STI_BCK = 4 << STI_PLANE_TYPE_SHIFT 26 + }; 27 + 28 + enum sti_plane_id_of_type { 29 + STI_ID_0 = 0, 30 + STI_ID_1 = 1, 31 + STI_ID_2 = 2, 32 + STI_ID_3 = 3 33 + }; 34 + 35 + enum sti_plane_desc { 36 + STI_GDP_0 = STI_GDP | STI_ID_0, 37 + STI_GDP_1 = STI_GDP | STI_ID_1, 38 + STI_GDP_2 = STI_GDP | STI_ID_2, 39 + STI_GDP_3 = STI_GDP | STI_ID_3, 40 + STI_HQVDP_0 = STI_VDP | STI_ID_0, 41 + STI_CURSOR = STI_CUR, 42 + STI_BACK = STI_BCK 43 + }; 44 + 45 + enum sti_plane_status { 46 + STI_PLANE_READY, 47 + STI_PLANE_UPDATED, 48 + STI_PLANE_DISABLING, 49 + STI_PLANE_FLUSHING, 50 + STI_PLANE_DISABLED, 51 + }; 52 + 53 + /** 54 + * STI plane structure 55 + * 56 + * @plane: drm plane it is bound to (if any) 57 + * @desc: plane type & id 58 + * @status: to know the status of the plane 59 + * @zorder: plane z-order 60 + */ 61 + struct sti_plane { 62 + struct drm_plane drm_plane; 63 + enum sti_plane_desc desc; 64 + enum sti_plane_status status; 65 + int zorder; 66 + }; 67 + 68 + const char *sti_plane_to_str(struct sti_plane *plane); 69 + void sti_plane_init_property(struct sti_plane *plane, 70 + enum drm_plane_type type); 71 + #endif
+8 -46
drivers/gpu/drm/sti/sti_tvout.c
··· 16 16 #include <drm/drmP.h> 17 17 #include <drm/drm_crtc_helper.h> 18 18 19 - #include "sti_drm_crtc.h" 19 + #include "sti_crtc.h" 20 20 21 21 /* glue registers */ 22 22 #define TVO_CSC_MAIN_M0 0x000 ··· 473 473 { 474 474 struct sti_tvout *tvout = to_sti_tvout(encoder); 475 475 476 - tvout_dvo_start(tvout, sti_drm_crtc_is_main(encoder->crtc)); 476 + tvout_dvo_start(tvout, sti_crtc_is_main(encoder->crtc)); 477 477 } 478 478 479 479 static void sti_dvo_encoder_disable(struct drm_encoder *encoder) ··· 523 523 { 524 524 struct sti_tvout *tvout = to_sti_tvout(encoder); 525 525 526 - tvout_hda_start(tvout, sti_drm_crtc_is_main(encoder->crtc)); 526 + tvout_hda_start(tvout, sti_crtc_is_main(encoder->crtc)); 527 527 } 528 528 529 529 static void sti_hda_encoder_disable(struct drm_encoder *encoder) ··· 575 575 { 576 576 struct sti_tvout *tvout = to_sti_tvout(encoder); 577 577 578 - tvout_hdmi_start(tvout, sti_drm_crtc_is_main(encoder->crtc)); 578 + tvout_hdmi_start(tvout, sti_crtc_is_main(encoder->crtc)); 579 579 } 580 580 581 581 static void sti_hdmi_encoder_disable(struct drm_encoder *encoder) ··· 644 644 struct sti_tvout *tvout = dev_get_drvdata(dev); 645 645 struct drm_device *drm_dev = data; 646 646 unsigned int i; 647 - int ret; 648 647 649 648 tvout->drm_dev = drm_dev; 650 649 ··· 657 658 658 659 sti_tvout_create_encoders(drm_dev, tvout); 659 660 660 - ret = component_bind_all(dev, drm_dev); 661 - if (ret) 662 - sti_tvout_destroy_encoders(tvout); 663 - 664 - return ret; 661 + return 0; 665 662 } 666 663 667 664 static void sti_tvout_unbind(struct device *dev, struct device *master, 668 665 void *data) 669 666 { 670 - /* do nothing */ 667 + struct sti_tvout *tvout = dev_get_drvdata(dev); 668 + 669 + sti_tvout_destroy_encoders(tvout); 671 670 } 672 671 673 672 static const struct component_ops sti_tvout_ops = { 674 673 .bind = sti_tvout_bind, 675 674 .unbind = sti_tvout_unbind, 676 - }; 677 - 678 - static int compare_of(struct device *dev, void *data) 679 - { 680 - return dev->of_node == data; 681 - } 682 - 683 - static int sti_tvout_master_bind(struct device *dev) 684 - { 685 - return 0; 686 - } 687 - 688 - static void sti_tvout_master_unbind(struct device *dev) 689 - { 690 - /* do nothing */ 691 - } 692 - 693 - static const struct component_master_ops sti_tvout_master_ops = { 694 - .bind = sti_tvout_master_bind, 695 - .unbind = sti_tvout_master_unbind, 696 675 }; 697 676 698 677 static int sti_tvout_probe(struct platform_device *pdev) ··· 679 702 struct device_node *node = dev->of_node; 680 703 struct sti_tvout *tvout; 681 704 struct resource *res; 682 - struct device_node *child_np; 683 - struct component_match *match = NULL; 684 705 685 706 DRM_INFO("%s\n", __func__); 686 707 ··· 709 734 710 735 platform_set_drvdata(pdev, tvout); 711 736 712 - of_platform_populate(node, NULL, NULL, dev); 713 - 714 - child_np = of_get_next_available_child(node, NULL); 715 - 716 - while (child_np) { 717 - component_match_add(dev, &match, compare_of, child_np); 718 - of_node_put(child_np); 719 - child_np = of_get_next_available_child(node, child_np); 720 - } 721 - 722 - component_master_add_with_match(dev, &sti_tvout_master_ops, match); 723 - 724 737 return component_add(dev, &sti_tvout_ops); 725 738 } 726 739 727 740 static int sti_tvout_remove(struct platform_device *pdev) 728 741 { 729 - component_master_del(&pdev->dev, &sti_tvout_master_ops); 730 742 component_del(&pdev->dev, &sti_tvout_ops); 731 743 return 0; 732 744 }
+29 -43
drivers/gpu/drm/sti/sti_vid.c
··· 6 6 7 7 #include <drm/drmP.h> 8 8 9 - #include "sti_layer.h" 9 + #include "sti_plane.h" 10 10 #include "sti_vid.h" 11 11 #include "sti_vtg.h" 12 12 ··· 43 43 #define VID_MPR2_BT709 0x07150545 44 44 #define VID_MPR3_BT709 0x00000AE8 45 45 46 - static int sti_vid_prepare_layer(struct sti_layer *vid, bool first_prepare) 46 + void sti_vid_commit(struct sti_vid *vid, 47 + struct drm_plane_state *state) 47 48 { 48 - u32 val; 49 + struct drm_crtc *crtc = state->crtc; 50 + struct drm_display_mode *mode = &crtc->mode; 51 + int dst_x = state->crtc_x; 52 + int dst_y = state->crtc_y; 53 + int dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x); 54 + int dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y); 55 + u32 val, ydo, xdo, yds, xds; 56 + 57 + /* Input / output size 58 + * Align to upper even value */ 59 + dst_w = ALIGN(dst_w, 2); 60 + dst_h = ALIGN(dst_h, 2); 49 61 50 62 /* Unmask */ 51 63 val = readl(vid->regs + VID_CTL); 52 64 val &= ~VID_CTL_IGNORE; 53 65 writel(val, vid->regs + VID_CTL); 54 66 55 - return 0; 56 - } 57 - 58 - static int sti_vid_commit_layer(struct sti_layer *vid) 59 - { 60 - struct drm_display_mode *mode = vid->mode; 61 - u32 ydo, xdo, yds, xds; 62 - 63 - ydo = sti_vtg_get_line_number(*mode, vid->dst_y); 64 - yds = sti_vtg_get_line_number(*mode, vid->dst_y + vid->dst_h - 1); 65 - xdo = sti_vtg_get_pixel_number(*mode, vid->dst_x); 66 - xds = sti_vtg_get_pixel_number(*mode, vid->dst_x + vid->dst_w - 1); 67 + ydo = sti_vtg_get_line_number(*mode, dst_y); 68 + yds = sti_vtg_get_line_number(*mode, dst_y + dst_h - 1); 69 + xdo = sti_vtg_get_pixel_number(*mode, dst_x); 70 + xds = sti_vtg_get_pixel_number(*mode, dst_x + dst_w - 1); 67 71 68 72 writel((ydo << 16) | xdo, vid->regs + VID_VPO); 69 73 writel((yds << 16) | xds, vid->regs + VID_VPS); 70 - 71 - return 0; 72 74 } 73 75 74 - static int sti_vid_disable_layer(struct sti_layer *vid) 76 + void sti_vid_disable(struct sti_vid *vid) 75 77 { 76 78 u32 val; 77 79 ··· 81 79 val = readl(vid->regs + VID_CTL); 82 80 val |= VID_CTL_IGNORE; 83 81 writel(val, vid->regs + VID_CTL); 84 - 85 - return 0; 86 82 } 87 83 88 - static const uint32_t *sti_vid_get_formats(struct sti_layer *layer) 89 - { 90 - return NULL; 91 - } 92 - 93 - static unsigned int sti_vid_get_nb_formats(struct sti_layer *layer) 94 - { 95 - return 0; 96 - } 97 - 98 - static void sti_vid_init(struct sti_layer *vid) 84 + static void sti_vid_init(struct sti_vid *vid) 99 85 { 100 86 /* Enable PSI, Mask layer */ 101 87 writel(VID_CTL_PSI_ENABLE | VID_CTL_IGNORE, vid->regs + VID_CTL); ··· 103 113 writel(VID_CSAT_DFLT, vid->regs + VID_CSAT); 104 114 } 105 115 106 - static const struct sti_layer_funcs vid_ops = { 107 - .get_formats = sti_vid_get_formats, 108 - .get_nb_formats = sti_vid_get_nb_formats, 109 - .init = sti_vid_init, 110 - .prepare = sti_vid_prepare_layer, 111 - .commit = sti_vid_commit_layer, 112 - .disable = sti_vid_disable_layer, 113 - }; 114 - 115 - struct sti_layer *sti_vid_create(struct device *dev) 116 + struct sti_vid *sti_vid_create(struct device *dev, int id, 117 + void __iomem *baseaddr) 116 118 { 117 - struct sti_layer *vid; 119 + struct sti_vid *vid; 118 120 119 121 vid = devm_kzalloc(dev, sizeof(*vid), GFP_KERNEL); 120 122 if (!vid) { ··· 114 132 return NULL; 115 133 } 116 134 117 - vid->ops = &vid_ops; 135 + vid->dev = dev; 136 + vid->regs = baseaddr; 137 + vid->id = id; 138 + 139 + sti_vid_init(vid); 118 140 119 141 return vid; 120 142 }
+18 -1
drivers/gpu/drm/sti/sti_vid.h
··· 7 7 #ifndef _STI_VID_H_ 8 8 #define _STI_VID_H_ 9 9 10 - struct sti_layer *sti_vid_create(struct device *dev); 10 + /** 11 + * STI VID structure 12 + * 13 + * @dev: driver device 14 + * @regs: vid registers 15 + * @id: id of the vid 16 + */ 17 + struct sti_vid { 18 + struct device *dev; 19 + void __iomem *regs; 20 + int id; 21 + }; 22 + 23 + void sti_vid_commit(struct sti_vid *vid, 24 + struct drm_plane_state *state); 25 + void sti_vid_disable(struct sti_vid *vid); 26 + struct sti_vid *sti_vid_create(struct device *dev, int id, 27 + void __iomem *baseaddr); 11 28 12 29 #endif