Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'exynos-drm-next-for-v4.18' of git://git.kernel.org/pub/scm/linux/kernel/git/daeinki/drm-exynos into drm-next

- Add S5PV210 FIMD variant support.

- Add IPP v2 framework.
. it is a rewritten version of the Exynos mem-to-mem image processing
framework which supprts color space conversion, image up/down-scaling
and rotation. This new version replaces existing userspace API with
new easy-to-use and simple ones so we have already applied the use of
these API to real user, Tizen Platform[1], and also makes existing
Scaler, FIMC, GScaler and Rotator drivers to use IPP v2 core API.

And below are patch lists we have applied to a real user,
https://git.tizen.org/cgit/platform/adaptation/samsung_exynos/libtdm-exynos/log/?h=tizen&qt=grep&q=ipp
https://git.tizen.org/cgit/platform/adaptation/samsung_exynos/libtdm-exynos/commit/?h=tizen&id=b59be207365d10efd489e6f71c8a045b558c44fe
https://git.tizen.org/cgit/platform/kernel/linux-exynos/log/?h=tizen&qt=grep&q=ipp

TDM(Tizen Display Manager) is a Display HAL for Tizen platform.
Ps. Only real user using IPP API is Tizen.

[1] https://www.tizen.org/

- Two cleanups
. One is to just remove mode_set callback from MIPI-DSI driver
because drm_display_mode data is already available from crtc
atomic state.
. And other is to just use new return type, vm_fault_t
for page fault handler.

Signed-off-by: Dave Airlie <airlied@redhat.com>

# gpg: Signature made Mon 14 May 2018 14:23:53 AEST
# gpg: using RSA key 573834890C4312B8
# gpg: Can't check signature: public key not found
Link: https://patchwork.freedesktop.org/patch/msgid/1526276453-29879-1-git-send-email-inki.dae@samsung.com

+3535 -2191
+27
Documentation/devicetree/bindings/gpu/samsung-scaler.txt
··· 1 + * Samsung Exynos Image Scaler 2 + 3 + Required properties: 4 + - compatible : value should be one of the following: 5 + (a) "samsung,exynos5420-scaler" for Scaler IP in Exynos5420 6 + (b) "samsung,exynos5433-scaler" for Scaler IP in Exynos5433 7 + 8 + - reg : Physical base address of the IP registers and length of memory 9 + mapped region. 10 + 11 + - interrupts : Interrupt specifier for scaler interrupt, according to format 12 + specific to interrupt parent. 13 + 14 + - clocks : Clock specifier for scaler clock, according to generic clock 15 + bindings. (See Documentation/devicetree/bindings/clock/exynos*.txt) 16 + 17 + - clock-names : Names of clocks. For exynos scaler, it should be "mscl" 18 + on 5420 and "pclk", "aclk" and "aclk_xiu" on 5433. 19 + 20 + Example: 21 + scaler@12800000 { 22 + compatible = "samsung,exynos5420-scaler"; 23 + reg = <0x12800000 0x1294>; 24 + interrupts = <0 220 IRQ_TYPE_LEVEL_HIGH>; 25 + clocks = <&clock CLK_MSCL0>; 26 + clock-names = "mscl"; 27 + };
+3 -1
drivers/gpu/drm/bridge/dumb-vga-dac.c
··· 56 56 } 57 57 58 58 drm_mode_connector_update_edid_property(connector, edid); 59 - return drm_add_edid_modes(connector, edid); 59 + ret = drm_add_edid_modes(connector, edid); 60 + kfree(edid); 61 + return ret; 60 62 61 63 fallback: 62 64 /*
+14 -4
drivers/gpu/drm/exynos/Kconfig
··· 1 1 config DRM_EXYNOS 2 2 tristate "DRM Support for Samsung SoC EXYNOS Series" 3 - depends on OF && DRM && (ARCH_S3C64XX || ARCH_EXYNOS || ARCH_MULTIPLATFORM) 3 + depends on OF && DRM && (ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || ARCH_MULTIPLATFORM) 4 4 select DRM_KMS_HELPER 5 5 select VIDEOMODE_HELPERS 6 6 select SND_SOC_HDMI_CODEC if SND_SOC ··· 95 95 help 96 96 Choose this option if you want to use Exynos G2D for DRM. 97 97 98 + config DRM_EXYNOS_IPP 99 + bool 100 + 98 101 config DRM_EXYNOS_FIMC 99 102 bool "FIMC" 100 - depends on BROKEN && MFD_SYSCON 103 + select DRM_EXYNOS_IPP 101 104 help 102 105 Choose this option if you want to use Exynos FIMC for DRM. 103 106 104 107 config DRM_EXYNOS_ROTATOR 105 108 bool "Rotator" 106 - depends on BROKEN 109 + select DRM_EXYNOS_IPP 107 110 help 108 111 Choose this option if you want to use Exynos Rotator for DRM. 109 112 113 + config DRM_EXYNOS_SCALER 114 + bool "Scaler" 115 + select DRM_EXYNOS_IPP 116 + help 117 + Choose this option if you want to use Exynos Scaler for DRM. 118 + 110 119 config DRM_EXYNOS_GSC 111 120 bool "GScaler" 112 - depends on BROKEN && ARCH_EXYNOS5 && VIDEO_SAMSUNG_EXYNOS_GSC=n 121 + depends on VIDEO_SAMSUNG_EXYNOS_GSC=n 122 + select DRM_EXYNOS_IPP 113 123 help 114 124 Choose this option if you want to use Exynos GSC for DRM. 115 125
+2
drivers/gpu/drm/exynos/Makefile
··· 18 18 exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o 19 19 exynosdrm-$(CONFIG_DRM_EXYNOS_VIDI) += exynos_drm_vidi.o 20 20 exynosdrm-$(CONFIG_DRM_EXYNOS_G2D) += exynos_drm_g2d.o 21 + exynosdrm-$(CONFIG_DRM_EXYNOS_IPP) += exynos_drm_ipp.o 21 22 exynosdrm-$(CONFIG_DRM_EXYNOS_FIMC) += exynos_drm_fimc.o 22 23 exynosdrm-$(CONFIG_DRM_EXYNOS_ROTATOR) += exynos_drm_rotator.o 24 + exynosdrm-$(CONFIG_DRM_EXYNOS_SCALER) += exynos_drm_scaler.o 23 25 exynosdrm-$(CONFIG_DRM_EXYNOS_GSC) += exynos_drm_gsc.o 24 26 exynosdrm-$(CONFIG_DRM_EXYNOS_MIC) += exynos_drm_mic.o 25 27
+32 -3
drivers/gpu/drm/exynos/exynos_drm_drv.c
··· 27 27 #include "exynos_drm_fb.h" 28 28 #include "exynos_drm_gem.h" 29 29 #include "exynos_drm_plane.h" 30 + #include "exynos_drm_ipp.h" 30 31 #include "exynos_drm_vidi.h" 31 32 #include "exynos_drm_g2d.h" 32 33 #include "exynos_drm_iommu.h" 33 34 34 35 #define DRIVER_NAME "exynos" 35 36 #define DRIVER_DESC "Samsung SoC DRM" 36 - #define DRIVER_DATE "20110530" 37 + #define DRIVER_DATE "20180330" 38 + 39 + /* 40 + * Interface history: 41 + * 42 + * 1.0 - Original version 43 + * 1.1 - Upgrade IPP driver to version 2.0 44 + */ 37 45 #define DRIVER_MAJOR 1 38 - #define DRIVER_MINOR 0 46 + #define DRIVER_MINOR 1 39 47 40 48 static int exynos_drm_open(struct drm_device *dev, struct drm_file *file) 41 49 { ··· 95 87 DRM_IOCTL_DEF_DRV(EXYNOS_G2D_SET_CMDLIST, exynos_g2d_set_cmdlist_ioctl, 96 88 DRM_AUTH | DRM_RENDER_ALLOW), 97 89 DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC, exynos_g2d_exec_ioctl, 90 + DRM_AUTH | DRM_RENDER_ALLOW), 91 + DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_RESOURCES, 92 + exynos_drm_ipp_get_res_ioctl, 93 + DRM_AUTH | DRM_RENDER_ALLOW), 94 + DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_CAPS, exynos_drm_ipp_get_caps_ioctl, 95 + DRM_AUTH | DRM_RENDER_ALLOW), 96 + DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_LIMITS, 97 + exynos_drm_ipp_get_limits_ioctl, 98 + DRM_AUTH | DRM_RENDER_ALLOW), 99 + DRM_IOCTL_DEF_DRV(EXYNOS_IPP_COMMIT, exynos_drm_ipp_commit_ioctl, 98 100 DRM_AUTH | DRM_RENDER_ALLOW), 99 101 }; 100 102 ··· 202 184 #define DRM_COMPONENT_DRIVER BIT(0) /* supports component framework */ 203 185 #define DRM_VIRTUAL_DEVICE BIT(1) /* create virtual platform device */ 204 186 #define DRM_DMA_DEVICE BIT(2) /* can be used for dma allocations */ 187 + #define DRM_FIMC_DEVICE BIT(3) /* devices shared with V4L2 subsystem */ 205 188 206 189 #define DRV_PTR(drv, cond) (IS_ENABLED(cond) ? &drv : NULL) 207 190 ··· 242 223 DRV_PTR(g2d_driver, CONFIG_DRM_EXYNOS_G2D), 243 224 }, { 244 225 DRV_PTR(fimc_driver, CONFIG_DRM_EXYNOS_FIMC), 226 + DRM_COMPONENT_DRIVER | DRM_FIMC_DEVICE, 245 227 }, { 246 228 DRV_PTR(rotator_driver, CONFIG_DRM_EXYNOS_ROTATOR), 229 + DRM_COMPONENT_DRIVER 230 + }, { 231 + DRV_PTR(scaler_driver, CONFIG_DRM_EXYNOS_SCALER), 232 + DRM_COMPONENT_DRIVER 247 233 }, { 248 234 DRV_PTR(gsc_driver, CONFIG_DRM_EXYNOS_GSC), 235 + DRM_COMPONENT_DRIVER 249 236 }, { 250 237 &exynos_drm_platform_driver, 251 238 DRM_VIRTUAL_DEVICE ··· 279 254 &info->driver->driver, 280 255 (void *)platform_bus_type.match))) { 281 256 put_device(p); 282 - component_match_add(dev, &match, compare_dev, d); 257 + 258 + if (!(info->flags & DRM_FIMC_DEVICE) || 259 + exynos_drm_check_fimc_device(d) == 0) 260 + component_match_add(dev, &match, 261 + compare_dev, d); 283 262 p = d; 284 263 } 285 264 put_device(p);
+10
drivers/gpu/drm/exynos/exynos_drm_drv.h
··· 273 273 } 274 274 #endif 275 275 276 + #ifdef CONFIG_DRM_EXYNOS_FIMC 277 + int exynos_drm_check_fimc_device(struct device *dev); 278 + #else 279 + static inline int exynos_drm_check_fimc_device(struct device *dev) 280 + { 281 + return 0; 282 + } 283 + #endif 284 + 276 285 int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, 277 286 bool nonblock); 278 287 ··· 297 288 extern struct platform_driver g2d_driver; 298 289 extern struct platform_driver fimc_driver; 299 290 extern struct platform_driver rotator_driver; 291 + extern struct platform_driver scaler_driver; 300 292 extern struct platform_driver gsc_driver; 301 293 extern struct platform_driver mic_driver; 302 294 #endif
+10 -30
drivers/gpu/drm/exynos/exynos_drm_dsi.c
··· 270 270 u32 lanes; 271 271 u32 mode_flags; 272 272 u32 format; 273 - struct videomode vm; 274 273 275 274 int state; 276 275 struct drm_property *brightness; ··· 880 881 881 882 static void exynos_dsi_set_display_mode(struct exynos_dsi *dsi) 882 883 { 883 - struct videomode *vm = &dsi->vm; 884 + struct drm_display_mode *m = &dsi->encoder.crtc->state->adjusted_mode; 884 885 unsigned int num_bits_resol = dsi->driver_data->num_bits_resol; 885 886 u32 reg; 886 887 887 888 if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) { 888 889 reg = DSIM_CMD_ALLOW(0xf) 889 - | DSIM_STABLE_VFP(vm->vfront_porch) 890 - | DSIM_MAIN_VBP(vm->vback_porch); 890 + | DSIM_STABLE_VFP(m->vsync_start - m->vdisplay) 891 + | DSIM_MAIN_VBP(m->vtotal - m->vsync_end); 891 892 exynos_dsi_write(dsi, DSIM_MVPORCH_REG, reg); 892 893 893 - reg = DSIM_MAIN_HFP(vm->hfront_porch) 894 - | DSIM_MAIN_HBP(vm->hback_porch); 894 + reg = DSIM_MAIN_HFP(m->hsync_start - m->hdisplay) 895 + | DSIM_MAIN_HBP(m->htotal - m->hsync_end); 895 896 exynos_dsi_write(dsi, DSIM_MHPORCH_REG, reg); 896 897 897 - reg = DSIM_MAIN_VSA(vm->vsync_len) 898 - | DSIM_MAIN_HSA(vm->hsync_len); 898 + reg = DSIM_MAIN_VSA(m->vsync_end - m->vsync_start) 899 + | DSIM_MAIN_HSA(m->hsync_end - m->hsync_start); 899 900 exynos_dsi_write(dsi, DSIM_MSYNC_REG, reg); 900 901 } 901 - reg = DSIM_MAIN_HRESOL(vm->hactive, num_bits_resol) | 902 - DSIM_MAIN_VRESOL(vm->vactive, num_bits_resol); 902 + reg = DSIM_MAIN_HRESOL(m->hdisplay, num_bits_resol) | 903 + DSIM_MAIN_VRESOL(m->vdisplay, num_bits_resol); 903 904 904 905 exynos_dsi_write(dsi, DSIM_MDRESOL_REG, reg); 905 906 906 - dev_dbg(dsi->dev, "LCD size = %dx%d\n", vm->hactive, vm->vactive); 907 + dev_dbg(dsi->dev, "LCD size = %dx%d\n", m->hdisplay, m->vdisplay); 907 908 } 908 909 909 910 static void exynos_dsi_set_display_enable(struct exynos_dsi *dsi, bool enable) ··· 1484 1485 return 0; 1485 1486 } 1486 1487 1487 - static void exynos_dsi_mode_set(struct drm_encoder *encoder, 1488 - struct drm_display_mode *mode, 1489 - struct drm_display_mode *adjusted_mode) 1490 - { 1491 - struct exynos_dsi *dsi = encoder_to_dsi(encoder); 1492 - struct videomode *vm = &dsi->vm; 1493 - struct drm_display_mode *m = adjusted_mode; 1494 - 1495 - vm->hactive = m->hdisplay; 1496 - vm->vactive = m->vdisplay; 1497 - vm->vfront_porch = m->vsync_start - m->vdisplay; 1498 - vm->vback_porch = m->vtotal - m->vsync_end; 1499 - vm->vsync_len = m->vsync_end - m->vsync_start; 1500 - vm->hfront_porch = m->hsync_start - m->hdisplay; 1501 - vm->hback_porch = m->htotal - m->hsync_end; 1502 - vm->hsync_len = m->hsync_end - m->hsync_start; 1503 - } 1504 - 1505 1488 static const struct drm_encoder_helper_funcs exynos_dsi_encoder_helper_funcs = { 1506 - .mode_set = exynos_dsi_mode_set, 1507 1489 .enable = exynos_dsi_enable, 1508 1490 .disable = exynos_dsi_disable, 1509 1491 };
+353 -735
drivers/gpu/drm/exynos/exynos_drm_fimc.c
··· 12 12 * 13 13 */ 14 14 #include <linux/kernel.h> 15 + #include <linux/component.h> 15 16 #include <linux/platform_device.h> 16 17 #include <linux/mfd/syscon.h> 17 18 #include <linux/regmap.h> ··· 25 24 #include <drm/exynos_drm.h> 26 25 #include "regs-fimc.h" 27 26 #include "exynos_drm_drv.h" 27 + #include "exynos_drm_iommu.h" 28 28 #include "exynos_drm_ipp.h" 29 - #include "exynos_drm_fimc.h" 30 29 31 30 /* 32 31 * FIMC stands for Fully Interactive Mobile Camera and ··· 34 33 * input DMA reads image data from the memory. 35 34 * output DMA writes image data to memory. 36 35 * FIMC supports image rotation and image effect functions. 37 - * 38 - * M2M operation : supports crop/scale/rotation/csc so on. 39 - * Memory ----> FIMC H/W ----> Memory. 40 - * Writeback operation : supports cloned screen with FIMD. 41 - * FIMD ----> FIMC H/W ----> Memory. 42 - * Output operation : supports direct display using local path. 43 - * Memory ----> FIMC H/W ----> FIMD. 44 - */ 45 - 46 - /* 47 - * TODO 48 - * 1. check suspend/resume api if needed. 49 - * 2. need to check use case platform_device_id. 50 - * 3. check src/dst size with, height. 51 - * 4. added check_prepare api for right register. 52 - * 5. need to add supported list in prop_list. 53 - * 6. check prescaler/scaler optimization. 54 36 */ 55 37 56 38 #define FIMC_MAX_DEVS 4 ··· 43 59 #define FIMC_BUF_STOP 1 44 60 #define FIMC_BUF_START 2 45 61 #define FIMC_WIDTH_ITU_709 1280 46 - #define FIMC_REFRESH_MAX 60 47 - #define FIMC_REFRESH_MIN 12 48 - #define FIMC_CROP_MAX 8192 49 - #define FIMC_CROP_MIN 32 50 - #define FIMC_SCALE_MAX 4224 51 - #define FIMC_SCALE_MIN 32 62 + #define FIMC_AUTOSUSPEND_DELAY 2000 63 + 64 + static unsigned int fimc_mask = 0xc; 65 + module_param_named(fimc_devs, fimc_mask, uint, 0644); 66 + MODULE_PARM_DESC(fimc_devs, "Alias mask for assigning FIMC devices to Exynos DRM"); 52 67 53 68 #define get_fimc_context(dev) platform_get_drvdata(to_platform_device(dev)) 54 - #define get_ctx_from_ippdrv(ippdrv) container_of(ippdrv,\ 55 - struct fimc_context, ippdrv); 56 - enum fimc_wb { 57 - FIMC_WB_NONE, 58 - FIMC_WB_A, 59 - FIMC_WB_B, 60 - }; 61 69 62 70 enum { 63 71 FIMC_CLK_LCLK, 64 72 FIMC_CLK_GATE, 65 73 FIMC_CLK_WB_A, 66 74 FIMC_CLK_WB_B, 67 - FIMC_CLK_MUX, 68 - FIMC_CLK_PARENT, 69 75 FIMC_CLKS_MAX 70 76 }; 71 77 ··· 64 90 [FIMC_CLK_GATE] = "fimc", 65 91 [FIMC_CLK_WB_A] = "pxl_async0", 66 92 [FIMC_CLK_WB_B] = "pxl_async1", 67 - [FIMC_CLK_MUX] = "mux", 68 - [FIMC_CLK_PARENT] = "parent", 69 93 }; 70 - 71 - #define FIMC_DEFAULT_LCLK_FREQUENCY 133000000UL 72 94 73 95 /* 74 96 * A structure of scaler. ··· 77 107 * @vratio: vertical ratio. 78 108 */ 79 109 struct fimc_scaler { 80 - bool range; 110 + bool range; 81 111 bool bypass; 82 112 bool up_h; 83 113 bool up_v; ··· 86 116 }; 87 117 88 118 /* 89 - * A structure of scaler capability. 90 - * 91 - * find user manual table 43-1. 92 - * @in_hori: scaler input horizontal size. 93 - * @bypass: scaler bypass mode. 94 - * @dst_h_wo_rot: target horizontal size without output rotation. 95 - * @dst_h_rot: target horizontal size with output rotation. 96 - * @rl_w_wo_rot: real width without input rotation. 97 - * @rl_h_rot: real height without output rotation. 98 - */ 99 - struct fimc_capability { 100 - /* scaler */ 101 - u32 in_hori; 102 - u32 bypass; 103 - /* output rotator */ 104 - u32 dst_h_wo_rot; 105 - u32 dst_h_rot; 106 - /* input rotator */ 107 - u32 rl_w_wo_rot; 108 - u32 rl_h_rot; 109 - }; 110 - 111 - /* 112 119 * A structure of fimc context. 113 120 * 114 - * @ippdrv: prepare initialization using ippdrv. 115 121 * @regs_res: register resources. 116 122 * @regs: memory mapped io registers. 117 123 * @lock: locking of operations. 118 124 * @clocks: fimc clocks. 119 - * @clk_frequency: LCLK clock frequency. 120 - * @sysreg: handle to SYSREG block regmap. 121 125 * @sc: scaler infomations. 122 126 * @pol: porarity of writeback. 123 127 * @id: fimc id. 124 128 * @irq: irq number. 125 - * @suspended: qos operations. 126 129 */ 127 130 struct fimc_context { 128 - struct exynos_drm_ippdrv ippdrv; 131 + struct exynos_drm_ipp ipp; 132 + struct drm_device *drm_dev; 133 + struct device *dev; 134 + struct exynos_drm_ipp_task *task; 135 + struct exynos_drm_ipp_formats *formats; 136 + unsigned int num_formats; 137 + 129 138 struct resource *regs_res; 130 139 void __iomem *regs; 131 140 spinlock_t lock; 132 141 struct clk *clocks[FIMC_CLKS_MAX]; 133 - u32 clk_frequency; 134 - struct regmap *sysreg; 135 142 struct fimc_scaler sc; 136 143 int id; 137 144 int irq; 138 - bool suspended; 139 145 }; 140 146 141 147 static u32 fimc_read(struct fimc_context *ctx, u32 reg) ··· 163 217 fimc_write(ctx, 0x0, EXYNOS_CIFCNTSEQ); 164 218 } 165 219 166 - static int fimc_set_camblk_fimd0_wb(struct fimc_context *ctx) 167 - { 168 - return regmap_update_bits(ctx->sysreg, SYSREG_CAMERA_BLK, 169 - SYSREG_FIMD0WB_DEST_MASK, 170 - ctx->id << SYSREG_FIMD0WB_DEST_SHIFT); 171 - } 172 - 173 - static void fimc_set_type_ctrl(struct fimc_context *ctx, enum fimc_wb wb) 220 + static void fimc_set_type_ctrl(struct fimc_context *ctx) 174 221 { 175 222 u32 cfg; 176 - 177 - DRM_DEBUG_KMS("wb[%d]\n", wb); 178 223 179 224 cfg = fimc_read(ctx, EXYNOS_CIGCTRL); 180 225 cfg &= ~(EXYNOS_CIGCTRL_TESTPATTERN_MASK | ··· 175 238 EXYNOS_CIGCTRL_SELWB_CAMIF_MASK | 176 239 EXYNOS_CIGCTRL_SELWRITEBACK_MASK); 177 240 178 - switch (wb) { 179 - case FIMC_WB_A: 180 - cfg |= (EXYNOS_CIGCTRL_SELWRITEBACK_A | 181 - EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK); 182 - break; 183 - case FIMC_WB_B: 184 - cfg |= (EXYNOS_CIGCTRL_SELWRITEBACK_B | 185 - EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK); 186 - break; 187 - case FIMC_WB_NONE: 188 - default: 189 - cfg |= (EXYNOS_CIGCTRL_SELCAM_ITU_A | 190 - EXYNOS_CIGCTRL_SELWRITEBACK_A | 191 - EXYNOS_CIGCTRL_SELCAM_MIPI_A | 192 - EXYNOS_CIGCTRL_SELCAM_FIMC_ITU); 193 - break; 194 - } 241 + cfg |= (EXYNOS_CIGCTRL_SELCAM_ITU_A | 242 + EXYNOS_CIGCTRL_SELWRITEBACK_A | 243 + EXYNOS_CIGCTRL_SELCAM_MIPI_A | 244 + EXYNOS_CIGCTRL_SELCAM_FIMC_ITU); 195 245 196 246 fimc_write(ctx, cfg, EXYNOS_CIGCTRL); 197 247 } ··· 220 296 221 297 static bool fimc_check_ovf(struct fimc_context *ctx) 222 298 { 223 - struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 224 299 u32 status, flag; 225 300 226 301 status = fimc_read(ctx, EXYNOS_CISTATUS); ··· 233 310 EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB | 234 311 EXYNOS_CIWDOFST_CLROVFICR); 235 312 236 - dev_err(ippdrv->dev, "occurred overflow at %d, status 0x%x.\n", 313 + dev_err(ctx->dev, "occurred overflow at %d, status 0x%x.\n", 237 314 ctx->id, status); 238 315 return true; 239 316 } ··· 299 376 fimc_write(ctx, cfg, EXYNOS_CIOCTRL); 300 377 } 301 378 302 - 303 - static int fimc_src_set_fmt_order(struct fimc_context *ctx, u32 fmt) 379 + static void fimc_src_set_fmt_order(struct fimc_context *ctx, u32 fmt) 304 380 { 305 - struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 306 381 u32 cfg; 307 382 308 383 DRM_DEBUG_KMS("fmt[0x%x]\n", fmt); ··· 313 392 case DRM_FORMAT_RGB565: 314 393 cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB565; 315 394 fimc_write(ctx, cfg, EXYNOS_CISCCTRL); 316 - return 0; 395 + return; 317 396 case DRM_FORMAT_RGB888: 318 397 case DRM_FORMAT_XRGB8888: 319 398 cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB888; 320 399 fimc_write(ctx, cfg, EXYNOS_CISCCTRL); 321 - return 0; 400 + return; 322 401 default: 323 402 /* bypass */ 324 403 break; ··· 359 438 cfg |= (EXYNOS_MSCTRL_ORDER2P_LSB_CBCR | 360 439 EXYNOS_MSCTRL_C_INT_IN_2PLANE); 361 440 break; 362 - default: 363 - dev_err(ippdrv->dev, "invalid source yuv order 0x%x.\n", fmt); 364 - return -EINVAL; 365 441 } 366 442 367 443 fimc_write(ctx, cfg, EXYNOS_MSCTRL); 368 - 369 - return 0; 370 444 } 371 445 372 - static int fimc_src_set_fmt(struct device *dev, u32 fmt) 446 + static void fimc_src_set_fmt(struct fimc_context *ctx, u32 fmt, bool tiled) 373 447 { 374 - struct fimc_context *ctx = get_fimc_context(dev); 375 - struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 376 448 u32 cfg; 377 449 378 450 DRM_DEBUG_KMS("fmt[0x%x]\n", fmt); ··· 399 485 case DRM_FORMAT_NV21: 400 486 cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR420; 401 487 break; 402 - default: 403 - dev_err(ippdrv->dev, "invalid source format 0x%x.\n", fmt); 404 - return -EINVAL; 405 488 } 406 489 407 490 fimc_write(ctx, cfg, EXYNOS_MSCTRL); ··· 406 495 cfg = fimc_read(ctx, EXYNOS_CIDMAPARAM); 407 496 cfg &= ~EXYNOS_CIDMAPARAM_R_MODE_MASK; 408 497 409 - cfg |= EXYNOS_CIDMAPARAM_R_MODE_LINEAR; 498 + if (tiled) 499 + cfg |= EXYNOS_CIDMAPARAM_R_MODE_64X32; 500 + else 501 + cfg |= EXYNOS_CIDMAPARAM_R_MODE_LINEAR; 410 502 411 503 fimc_write(ctx, cfg, EXYNOS_CIDMAPARAM); 412 504 413 - return fimc_src_set_fmt_order(ctx, fmt); 505 + fimc_src_set_fmt_order(ctx, fmt); 414 506 } 415 507 416 - static int fimc_src_set_transf(struct device *dev, 417 - enum drm_exynos_degree degree, 418 - enum drm_exynos_flip flip, bool *swap) 508 + static void fimc_src_set_transf(struct fimc_context *ctx, unsigned int rotation) 419 509 { 420 - struct fimc_context *ctx = get_fimc_context(dev); 421 - struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 510 + unsigned int degree = rotation & DRM_MODE_ROTATE_MASK; 422 511 u32 cfg1, cfg2; 423 512 424 - DRM_DEBUG_KMS("degree[%d]flip[0x%x]\n", degree, flip); 513 + DRM_DEBUG_KMS("rotation[%x]\n", rotation); 425 514 426 515 cfg1 = fimc_read(ctx, EXYNOS_MSCTRL); 427 516 cfg1 &= ~(EXYNOS_MSCTRL_FLIP_X_MIRROR | ··· 431 520 cfg2 &= ~EXYNOS_CITRGFMT_INROT90_CLOCKWISE; 432 521 433 522 switch (degree) { 434 - case EXYNOS_DRM_DEGREE_0: 435 - if (flip & EXYNOS_DRM_FLIP_VERTICAL) 523 + case DRM_MODE_ROTATE_0: 524 + if (rotation & DRM_MODE_REFLECT_X) 436 525 cfg1 |= EXYNOS_MSCTRL_FLIP_X_MIRROR; 437 - if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) 526 + if (rotation & DRM_MODE_REFLECT_Y) 438 527 cfg1 |= EXYNOS_MSCTRL_FLIP_Y_MIRROR; 439 528 break; 440 - case EXYNOS_DRM_DEGREE_90: 529 + case DRM_MODE_ROTATE_90: 441 530 cfg2 |= EXYNOS_CITRGFMT_INROT90_CLOCKWISE; 442 - if (flip & EXYNOS_DRM_FLIP_VERTICAL) 531 + if (rotation & DRM_MODE_REFLECT_X) 443 532 cfg1 |= EXYNOS_MSCTRL_FLIP_X_MIRROR; 444 - if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) 533 + if (rotation & DRM_MODE_REFLECT_Y) 445 534 cfg1 |= EXYNOS_MSCTRL_FLIP_Y_MIRROR; 446 535 break; 447 - case EXYNOS_DRM_DEGREE_180: 536 + case DRM_MODE_ROTATE_180: 448 537 cfg1 |= (EXYNOS_MSCTRL_FLIP_X_MIRROR | 449 538 EXYNOS_MSCTRL_FLIP_Y_MIRROR); 450 - if (flip & EXYNOS_DRM_FLIP_VERTICAL) 539 + if (rotation & DRM_MODE_REFLECT_X) 451 540 cfg1 &= ~EXYNOS_MSCTRL_FLIP_X_MIRROR; 452 - if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) 541 + if (rotation & DRM_MODE_REFLECT_Y) 453 542 cfg1 &= ~EXYNOS_MSCTRL_FLIP_Y_MIRROR; 454 543 break; 455 - case EXYNOS_DRM_DEGREE_270: 544 + case DRM_MODE_ROTATE_270: 456 545 cfg1 |= (EXYNOS_MSCTRL_FLIP_X_MIRROR | 457 546 EXYNOS_MSCTRL_FLIP_Y_MIRROR); 458 547 cfg2 |= EXYNOS_CITRGFMT_INROT90_CLOCKWISE; 459 - if (flip & EXYNOS_DRM_FLIP_VERTICAL) 548 + if (rotation & DRM_MODE_REFLECT_X) 460 549 cfg1 &= ~EXYNOS_MSCTRL_FLIP_X_MIRROR; 461 - if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) 550 + if (rotation & DRM_MODE_REFLECT_Y) 462 551 cfg1 &= ~EXYNOS_MSCTRL_FLIP_Y_MIRROR; 463 552 break; 464 - default: 465 - dev_err(ippdrv->dev, "invalid degree value %d.\n", degree); 466 - return -EINVAL; 467 553 } 468 554 469 555 fimc_write(ctx, cfg1, EXYNOS_MSCTRL); 470 556 fimc_write(ctx, cfg2, EXYNOS_CITRGFMT); 471 - *swap = (cfg2 & EXYNOS_CITRGFMT_INROT90_CLOCKWISE) ? 1 : 0; 472 - 473 - return 0; 474 557 } 475 558 476 - static int fimc_set_window(struct fimc_context *ctx, 477 - struct drm_exynos_pos *pos, struct drm_exynos_sz *sz) 559 + static void fimc_set_window(struct fimc_context *ctx, 560 + struct exynos_drm_ipp_buffer *buf) 478 561 { 479 562 u32 cfg, h1, h2, v1, v2; 480 563 481 564 /* cropped image */ 482 - h1 = pos->x; 483 - h2 = sz->hsize - pos->w - pos->x; 484 - v1 = pos->y; 485 - v2 = sz->vsize - pos->h - pos->y; 565 + h1 = buf->rect.x; 566 + h2 = buf->buf.width - buf->rect.w - buf->rect.x; 567 + v1 = buf->rect.y; 568 + v2 = buf->buf.height - buf->rect.h - buf->rect.y; 486 569 487 570 DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]hsize[%d]vsize[%d]\n", 488 - pos->x, pos->y, pos->w, pos->h, sz->hsize, sz->vsize); 571 + buf->rect.x, buf->rect.y, buf->rect.w, buf->rect.h, 572 + buf->buf.width, buf->buf.height); 489 573 DRM_DEBUG_KMS("h1[%d]h2[%d]v1[%d]v2[%d]\n", h1, h2, v1, v2); 490 574 491 575 /* ··· 498 592 cfg = (EXYNOS_CIWDOFST2_WINHOROFST2(h2) | 499 593 EXYNOS_CIWDOFST2_WINVEROFST2(v2)); 500 594 fimc_write(ctx, cfg, EXYNOS_CIWDOFST2); 501 - 502 - return 0; 503 595 } 504 596 505 - static int fimc_src_set_size(struct device *dev, int swap, 506 - struct drm_exynos_pos *pos, struct drm_exynos_sz *sz) 597 + static void fimc_src_set_size(struct fimc_context *ctx, 598 + struct exynos_drm_ipp_buffer *buf) 507 599 { 508 - struct fimc_context *ctx = get_fimc_context(dev); 509 - struct drm_exynos_pos img_pos = *pos; 510 - struct drm_exynos_sz img_sz = *sz; 511 600 u32 cfg; 512 601 513 - DRM_DEBUG_KMS("swap[%d]hsize[%d]vsize[%d]\n", 514 - swap, sz->hsize, sz->vsize); 602 + DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", buf->buf.width, buf->buf.height); 515 603 516 604 /* original size */ 517 - cfg = (EXYNOS_ORGISIZE_HORIZONTAL(img_sz.hsize) | 518 - EXYNOS_ORGISIZE_VERTICAL(img_sz.vsize)); 605 + cfg = (EXYNOS_ORGISIZE_HORIZONTAL(buf->buf.width) | 606 + EXYNOS_ORGISIZE_VERTICAL(buf->buf.height)); 519 607 520 608 fimc_write(ctx, cfg, EXYNOS_ORGISIZE); 521 609 522 - DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]\n", pos->x, pos->y, pos->w, pos->h); 523 - 524 - if (swap) { 525 - img_pos.w = pos->h; 526 - img_pos.h = pos->w; 527 - img_sz.hsize = sz->vsize; 528 - img_sz.vsize = sz->hsize; 529 - } 610 + DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]\n", buf->rect.x, buf->rect.y, 611 + buf->rect.w, buf->rect.h); 530 612 531 613 /* set input DMA image size */ 532 614 cfg = fimc_read(ctx, EXYNOS_CIREAL_ISIZE); 533 615 cfg &= ~(EXYNOS_CIREAL_ISIZE_HEIGHT_MASK | 534 616 EXYNOS_CIREAL_ISIZE_WIDTH_MASK); 535 - cfg |= (EXYNOS_CIREAL_ISIZE_WIDTH(img_pos.w) | 536 - EXYNOS_CIREAL_ISIZE_HEIGHT(img_pos.h)); 617 + cfg |= (EXYNOS_CIREAL_ISIZE_WIDTH(buf->rect.w) | 618 + EXYNOS_CIREAL_ISIZE_HEIGHT(buf->rect.h)); 537 619 fimc_write(ctx, cfg, EXYNOS_CIREAL_ISIZE); 538 620 539 621 /* ··· 529 635 * for now, we support only ITU601 8 bit mode 530 636 */ 531 637 cfg = (EXYNOS_CISRCFMT_ITU601_8BIT | 532 - EXYNOS_CISRCFMT_SOURCEHSIZE(img_sz.hsize) | 533 - EXYNOS_CISRCFMT_SOURCEVSIZE(img_sz.vsize)); 638 + EXYNOS_CISRCFMT_SOURCEHSIZE(buf->buf.width) | 639 + EXYNOS_CISRCFMT_SOURCEVSIZE(buf->buf.height)); 534 640 fimc_write(ctx, cfg, EXYNOS_CISRCFMT); 535 641 536 642 /* offset Y(RGB), Cb, Cr */ 537 - cfg = (EXYNOS_CIIYOFF_HORIZONTAL(img_pos.x) | 538 - EXYNOS_CIIYOFF_VERTICAL(img_pos.y)); 643 + cfg = (EXYNOS_CIIYOFF_HORIZONTAL(buf->rect.x) | 644 + EXYNOS_CIIYOFF_VERTICAL(buf->rect.y)); 539 645 fimc_write(ctx, cfg, EXYNOS_CIIYOFF); 540 - cfg = (EXYNOS_CIICBOFF_HORIZONTAL(img_pos.x) | 541 - EXYNOS_CIICBOFF_VERTICAL(img_pos.y)); 646 + cfg = (EXYNOS_CIICBOFF_HORIZONTAL(buf->rect.x) | 647 + EXYNOS_CIICBOFF_VERTICAL(buf->rect.y)); 542 648 fimc_write(ctx, cfg, EXYNOS_CIICBOFF); 543 - cfg = (EXYNOS_CIICROFF_HORIZONTAL(img_pos.x) | 544 - EXYNOS_CIICROFF_VERTICAL(img_pos.y)); 649 + cfg = (EXYNOS_CIICROFF_HORIZONTAL(buf->rect.x) | 650 + EXYNOS_CIICROFF_VERTICAL(buf->rect.y)); 545 651 fimc_write(ctx, cfg, EXYNOS_CIICROFF); 546 652 547 - return fimc_set_window(ctx, &img_pos, &img_sz); 653 + fimc_set_window(ctx, buf); 548 654 } 549 655 550 - static int fimc_src_set_addr(struct device *dev, 551 - struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id, 552 - enum drm_exynos_ipp_buf_type buf_type) 656 + static void fimc_src_set_addr(struct fimc_context *ctx, 657 + struct exynos_drm_ipp_buffer *buf) 553 658 { 554 - struct fimc_context *ctx = get_fimc_context(dev); 555 - struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 556 - struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node; 557 - struct drm_exynos_ipp_property *property; 558 - struct drm_exynos_ipp_config *config; 559 - 560 - if (!c_node) { 561 - DRM_ERROR("failed to get c_node.\n"); 562 - return -EINVAL; 563 - } 564 - 565 - property = &c_node->property; 566 - 567 - DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]buf_type[%d]\n", 568 - property->prop_id, buf_id, buf_type); 569 - 570 - if (buf_id > FIMC_MAX_SRC) { 571 - dev_info(ippdrv->dev, "invalid buf_id %d.\n", buf_id); 572 - return -ENOMEM; 573 - } 574 - 575 - /* address register set */ 576 - switch (buf_type) { 577 - case IPP_BUF_ENQUEUE: 578 - config = &property->config[EXYNOS_DRM_OPS_SRC]; 579 - fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_Y], 580 - EXYNOS_CIIYSA0); 581 - 582 - if (config->fmt == DRM_FORMAT_YVU420) { 583 - fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR], 584 - EXYNOS_CIICBSA0); 585 - fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB], 586 - EXYNOS_CIICRSA0); 587 - } else { 588 - fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB], 589 - EXYNOS_CIICBSA0); 590 - fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR], 591 - EXYNOS_CIICRSA0); 592 - } 593 - break; 594 - case IPP_BUF_DEQUEUE: 595 - fimc_write(ctx, 0x0, EXYNOS_CIIYSA0); 596 - fimc_write(ctx, 0x0, EXYNOS_CIICBSA0); 597 - fimc_write(ctx, 0x0, EXYNOS_CIICRSA0); 598 - break; 599 - default: 600 - /* bypass */ 601 - break; 602 - } 603 - 604 - return 0; 659 + fimc_write(ctx, buf->dma_addr[0], EXYNOS_CIIYSA(0)); 660 + fimc_write(ctx, buf->dma_addr[1], EXYNOS_CIICBSA(0)); 661 + fimc_write(ctx, buf->dma_addr[2], EXYNOS_CIICRSA(0)); 605 662 } 606 663 607 - static struct exynos_drm_ipp_ops fimc_src_ops = { 608 - .set_fmt = fimc_src_set_fmt, 609 - .set_transf = fimc_src_set_transf, 610 - .set_size = fimc_src_set_size, 611 - .set_addr = fimc_src_set_addr, 612 - }; 613 - 614 - static int fimc_dst_set_fmt_order(struct fimc_context *ctx, u32 fmt) 664 + static void fimc_dst_set_fmt_order(struct fimc_context *ctx, u32 fmt) 615 665 { 616 - struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 617 666 u32 cfg; 618 667 619 668 DRM_DEBUG_KMS("fmt[0x%x]\n", fmt); ··· 569 732 case DRM_FORMAT_RGB565: 570 733 cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB565; 571 734 fimc_write(ctx, cfg, EXYNOS_CISCCTRL); 572 - return 0; 735 + return; 573 736 case DRM_FORMAT_RGB888: 574 737 cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888; 575 738 fimc_write(ctx, cfg, EXYNOS_CISCCTRL); 576 - return 0; 739 + return; 577 740 case DRM_FORMAT_XRGB8888: 578 741 cfg |= (EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888 | 579 742 EXYNOS_CISCCTRL_EXTRGB_EXTENSION); ··· 621 784 cfg |= EXYNOS_CIOCTRL_ORDER2P_LSB_CBCR; 622 785 cfg |= EXYNOS_CIOCTRL_YCBCR_2PLANE; 623 786 break; 624 - default: 625 - dev_err(ippdrv->dev, "invalid target yuv order 0x%x.\n", fmt); 626 - return -EINVAL; 627 787 } 628 788 629 789 fimc_write(ctx, cfg, EXYNOS_CIOCTRL); 630 - 631 - return 0; 632 790 } 633 791 634 - static int fimc_dst_set_fmt(struct device *dev, u32 fmt) 792 + static void fimc_dst_set_fmt(struct fimc_context *ctx, u32 fmt, bool tiled) 635 793 { 636 - struct fimc_context *ctx = get_fimc_context(dev); 637 - struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 638 794 u32 cfg; 639 795 640 796 DRM_DEBUG_KMS("fmt[0x%x]\n", fmt); ··· 667 837 case DRM_FORMAT_NV21: 668 838 cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR420; 669 839 break; 670 - default: 671 - dev_err(ippdrv->dev, "invalid target format 0x%x.\n", 672 - fmt); 673 - return -EINVAL; 674 840 } 675 841 676 842 fimc_write(ctx, cfg, EXYNOS_CITRGFMT); ··· 675 849 cfg = fimc_read(ctx, EXYNOS_CIDMAPARAM); 676 850 cfg &= ~EXYNOS_CIDMAPARAM_W_MODE_MASK; 677 851 678 - cfg |= EXYNOS_CIDMAPARAM_W_MODE_LINEAR; 852 + if (tiled) 853 + cfg |= EXYNOS_CIDMAPARAM_W_MODE_64X32; 854 + else 855 + cfg |= EXYNOS_CIDMAPARAM_W_MODE_LINEAR; 679 856 680 857 fimc_write(ctx, cfg, EXYNOS_CIDMAPARAM); 681 858 682 - return fimc_dst_set_fmt_order(ctx, fmt); 859 + fimc_dst_set_fmt_order(ctx, fmt); 683 860 } 684 861 685 - static int fimc_dst_set_transf(struct device *dev, 686 - enum drm_exynos_degree degree, 687 - enum drm_exynos_flip flip, bool *swap) 862 + static void fimc_dst_set_transf(struct fimc_context *ctx, unsigned int rotation) 688 863 { 689 - struct fimc_context *ctx = get_fimc_context(dev); 690 - struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 864 + unsigned int degree = rotation & DRM_MODE_ROTATE_MASK; 691 865 u32 cfg; 692 866 693 - DRM_DEBUG_KMS("degree[%d]flip[0x%x]\n", degree, flip); 867 + DRM_DEBUG_KMS("rotation[0x%x]\n", rotation); 694 868 695 869 cfg = fimc_read(ctx, EXYNOS_CITRGFMT); 696 870 cfg &= ~EXYNOS_CITRGFMT_FLIP_MASK; 697 871 cfg &= ~EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE; 698 872 699 873 switch (degree) { 700 - case EXYNOS_DRM_DEGREE_0: 701 - if (flip & EXYNOS_DRM_FLIP_VERTICAL) 874 + case DRM_MODE_ROTATE_0: 875 + if (rotation & DRM_MODE_REFLECT_X) 702 876 cfg |= EXYNOS_CITRGFMT_FLIP_X_MIRROR; 703 - if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) 877 + if (rotation & DRM_MODE_REFLECT_Y) 704 878 cfg |= EXYNOS_CITRGFMT_FLIP_Y_MIRROR; 705 879 break; 706 - case EXYNOS_DRM_DEGREE_90: 880 + case DRM_MODE_ROTATE_90: 707 881 cfg |= EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE; 708 - if (flip & EXYNOS_DRM_FLIP_VERTICAL) 882 + if (rotation & DRM_MODE_REFLECT_X) 709 883 cfg |= EXYNOS_CITRGFMT_FLIP_X_MIRROR; 710 - if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) 884 + if (rotation & DRM_MODE_REFLECT_Y) 711 885 cfg |= EXYNOS_CITRGFMT_FLIP_Y_MIRROR; 712 886 break; 713 - case EXYNOS_DRM_DEGREE_180: 887 + case DRM_MODE_ROTATE_180: 714 888 cfg |= (EXYNOS_CITRGFMT_FLIP_X_MIRROR | 715 889 EXYNOS_CITRGFMT_FLIP_Y_MIRROR); 716 - if (flip & EXYNOS_DRM_FLIP_VERTICAL) 890 + if (rotation & DRM_MODE_REFLECT_X) 717 891 cfg &= ~EXYNOS_CITRGFMT_FLIP_X_MIRROR; 718 - if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) 892 + if (rotation & DRM_MODE_REFLECT_Y) 719 893 cfg &= ~EXYNOS_CITRGFMT_FLIP_Y_MIRROR; 720 894 break; 721 - case EXYNOS_DRM_DEGREE_270: 895 + case DRM_MODE_ROTATE_270: 722 896 cfg |= (EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE | 723 897 EXYNOS_CITRGFMT_FLIP_X_MIRROR | 724 898 EXYNOS_CITRGFMT_FLIP_Y_MIRROR); 725 - if (flip & EXYNOS_DRM_FLIP_VERTICAL) 899 + if (rotation & DRM_MODE_REFLECT_X) 726 900 cfg &= ~EXYNOS_CITRGFMT_FLIP_X_MIRROR; 727 - if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) 901 + if (rotation & DRM_MODE_REFLECT_Y) 728 902 cfg &= ~EXYNOS_CITRGFMT_FLIP_Y_MIRROR; 729 903 break; 730 - default: 731 - dev_err(ippdrv->dev, "invalid degree value %d.\n", degree); 732 - return -EINVAL; 733 904 } 734 905 735 906 fimc_write(ctx, cfg, EXYNOS_CITRGFMT); 736 - *swap = (cfg & EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE) ? 1 : 0; 737 - 738 - return 0; 739 907 } 740 908 741 909 static int fimc_set_prescaler(struct fimc_context *ctx, struct fimc_scaler *sc, 742 - struct drm_exynos_pos *src, struct drm_exynos_pos *dst) 910 + struct drm_exynos_ipp_task_rect *src, 911 + struct drm_exynos_ipp_task_rect *dst) 743 912 { 744 - struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 745 913 u32 cfg, cfg_ext, shfactor; 746 914 u32 pre_dst_width, pre_dst_height; 747 915 u32 hfactor, vfactor; ··· 762 942 /* fimc_ippdrv_check_property assures that dividers are not null */ 763 943 hfactor = fls(src_w / dst_w / 2); 764 944 if (hfactor > FIMC_SHFACTOR / 2) { 765 - dev_err(ippdrv->dev, "failed to get ratio horizontal.\n"); 945 + dev_err(ctx->dev, "failed to get ratio horizontal.\n"); 766 946 return -EINVAL; 767 947 } 768 948 769 949 vfactor = fls(src_h / dst_h / 2); 770 950 if (vfactor > FIMC_SHFACTOR / 2) { 771 - dev_err(ippdrv->dev, "failed to get ratio vertical.\n"); 951 + dev_err(ctx->dev, "failed to get ratio vertical.\n"); 772 952 return -EINVAL; 773 953 } 774 954 ··· 839 1019 fimc_write(ctx, cfg_ext, EXYNOS_CIEXTEN); 840 1020 } 841 1021 842 - static int fimc_dst_set_size(struct device *dev, int swap, 843 - struct drm_exynos_pos *pos, struct drm_exynos_sz *sz) 1022 + static void fimc_dst_set_size(struct fimc_context *ctx, 1023 + struct exynos_drm_ipp_buffer *buf) 844 1024 { 845 - struct fimc_context *ctx = get_fimc_context(dev); 846 - struct drm_exynos_pos img_pos = *pos; 847 - struct drm_exynos_sz img_sz = *sz; 848 - u32 cfg; 1025 + u32 cfg, cfg_ext; 849 1026 850 - DRM_DEBUG_KMS("swap[%d]hsize[%d]vsize[%d]\n", 851 - swap, sz->hsize, sz->vsize); 1027 + DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", buf->buf.width, buf->buf.height); 852 1028 853 1029 /* original size */ 854 - cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(img_sz.hsize) | 855 - EXYNOS_ORGOSIZE_VERTICAL(img_sz.vsize)); 1030 + cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(buf->buf.width) | 1031 + EXYNOS_ORGOSIZE_VERTICAL(buf->buf.height)); 856 1032 857 1033 fimc_write(ctx, cfg, EXYNOS_ORGOSIZE); 858 1034 859 - DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]\n", pos->x, pos->y, pos->w, pos->h); 1035 + DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]\n", buf->rect.x, buf->rect.y, 1036 + buf->rect.w, buf->rect.h); 860 1037 861 1038 /* CSC ITU */ 862 1039 cfg = fimc_read(ctx, EXYNOS_CIGCTRL); 863 1040 cfg &= ~EXYNOS_CIGCTRL_CSC_MASK; 864 1041 865 - if (sz->hsize >= FIMC_WIDTH_ITU_709) 1042 + if (buf->buf.width >= FIMC_WIDTH_ITU_709) 866 1043 cfg |= EXYNOS_CIGCTRL_CSC_ITU709; 867 1044 else 868 1045 cfg |= EXYNOS_CIGCTRL_CSC_ITU601; 869 1046 870 1047 fimc_write(ctx, cfg, EXYNOS_CIGCTRL); 871 1048 872 - if (swap) { 873 - img_pos.w = pos->h; 874 - img_pos.h = pos->w; 875 - img_sz.hsize = sz->vsize; 876 - img_sz.vsize = sz->hsize; 877 - } 1049 + cfg_ext = fimc_read(ctx, EXYNOS_CITRGFMT); 878 1050 879 1051 /* target image size */ 880 1052 cfg = fimc_read(ctx, EXYNOS_CITRGFMT); 881 1053 cfg &= ~(EXYNOS_CITRGFMT_TARGETH_MASK | 882 1054 EXYNOS_CITRGFMT_TARGETV_MASK); 883 - cfg |= (EXYNOS_CITRGFMT_TARGETHSIZE(img_pos.w) | 884 - EXYNOS_CITRGFMT_TARGETVSIZE(img_pos.h)); 1055 + if (cfg_ext & EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE) 1056 + cfg |= (EXYNOS_CITRGFMT_TARGETHSIZE(buf->rect.h) | 1057 + EXYNOS_CITRGFMT_TARGETVSIZE(buf->rect.w)); 1058 + else 1059 + cfg |= (EXYNOS_CITRGFMT_TARGETHSIZE(buf->rect.w) | 1060 + EXYNOS_CITRGFMT_TARGETVSIZE(buf->rect.h)); 885 1061 fimc_write(ctx, cfg, EXYNOS_CITRGFMT); 886 1062 887 1063 /* target area */ 888 - cfg = EXYNOS_CITAREA_TARGET_AREA(img_pos.w * img_pos.h); 1064 + cfg = EXYNOS_CITAREA_TARGET_AREA(buf->rect.w * buf->rect.h); 889 1065 fimc_write(ctx, cfg, EXYNOS_CITAREA); 890 1066 891 1067 /* offset Y(RGB), Cb, Cr */ 892 - cfg = (EXYNOS_CIOYOFF_HORIZONTAL(img_pos.x) | 893 - EXYNOS_CIOYOFF_VERTICAL(img_pos.y)); 1068 + cfg = (EXYNOS_CIOYOFF_HORIZONTAL(buf->rect.x) | 1069 + EXYNOS_CIOYOFF_VERTICAL(buf->rect.y)); 894 1070 fimc_write(ctx, cfg, EXYNOS_CIOYOFF); 895 - cfg = (EXYNOS_CIOCBOFF_HORIZONTAL(img_pos.x) | 896 - EXYNOS_CIOCBOFF_VERTICAL(img_pos.y)); 1071 + cfg = (EXYNOS_CIOCBOFF_HORIZONTAL(buf->rect.x) | 1072 + EXYNOS_CIOCBOFF_VERTICAL(buf->rect.y)); 897 1073 fimc_write(ctx, cfg, EXYNOS_CIOCBOFF); 898 - cfg = (EXYNOS_CIOCROFF_HORIZONTAL(img_pos.x) | 899 - EXYNOS_CIOCROFF_VERTICAL(img_pos.y)); 1074 + cfg = (EXYNOS_CIOCROFF_HORIZONTAL(buf->rect.x) | 1075 + EXYNOS_CIOCROFF_VERTICAL(buf->rect.y)); 900 1076 fimc_write(ctx, cfg, EXYNOS_CIOCROFF); 901 - 902 - return 0; 903 1077 } 904 1078 905 1079 static void fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id, 906 - enum drm_exynos_ipp_buf_type buf_type) 1080 + bool enqueue) 907 1081 { 908 1082 unsigned long flags; 909 1083 u32 buf_num; 910 1084 u32 cfg; 911 1085 912 - DRM_DEBUG_KMS("buf_id[%d]buf_type[%d]\n", buf_id, buf_type); 1086 + DRM_DEBUG_KMS("buf_id[%d]enqueu[%d]\n", buf_id, enqueue); 913 1087 914 1088 spin_lock_irqsave(&ctx->lock, flags); 915 1089 916 1090 cfg = fimc_read(ctx, EXYNOS_CIFCNTSEQ); 917 1091 918 - if (buf_type == IPP_BUF_ENQUEUE) 1092 + if (enqueue) 919 1093 cfg |= (1 << buf_id); 920 1094 else 921 1095 cfg &= ~(1 << buf_id); ··· 918 1104 919 1105 buf_num = hweight32(cfg); 920 1106 921 - if (buf_type == IPP_BUF_ENQUEUE && buf_num >= FIMC_BUF_START) 1107 + if (enqueue && buf_num >= FIMC_BUF_START) 922 1108 fimc_mask_irq(ctx, true); 923 - else if (buf_type == IPP_BUF_DEQUEUE && buf_num <= FIMC_BUF_STOP) 1109 + else if (!enqueue && buf_num <= FIMC_BUF_STOP) 924 1110 fimc_mask_irq(ctx, false); 925 1111 926 1112 spin_unlock_irqrestore(&ctx->lock, flags); 927 1113 } 928 1114 929 - static int fimc_dst_set_addr(struct device *dev, 930 - struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id, 931 - enum drm_exynos_ipp_buf_type buf_type) 1115 + static void fimc_dst_set_addr(struct fimc_context *ctx, 1116 + struct exynos_drm_ipp_buffer *buf) 932 1117 { 933 - struct fimc_context *ctx = get_fimc_context(dev); 934 - struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 935 - struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node; 936 - struct drm_exynos_ipp_property *property; 937 - struct drm_exynos_ipp_config *config; 1118 + fimc_write(ctx, buf->dma_addr[0], EXYNOS_CIOYSA(0)); 1119 + fimc_write(ctx, buf->dma_addr[1], EXYNOS_CIOCBSA(0)); 1120 + fimc_write(ctx, buf->dma_addr[2], EXYNOS_CIOCRSA(0)); 938 1121 939 - if (!c_node) { 940 - DRM_ERROR("failed to get c_node.\n"); 941 - return -EINVAL; 942 - } 943 - 944 - property = &c_node->property; 945 - 946 - DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]buf_type[%d]\n", 947 - property->prop_id, buf_id, buf_type); 948 - 949 - if (buf_id > FIMC_MAX_DST) { 950 - dev_info(ippdrv->dev, "invalid buf_id %d.\n", buf_id); 951 - return -ENOMEM; 952 - } 953 - 954 - /* address register set */ 955 - switch (buf_type) { 956 - case IPP_BUF_ENQUEUE: 957 - config = &property->config[EXYNOS_DRM_OPS_DST]; 958 - 959 - fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_Y], 960 - EXYNOS_CIOYSA(buf_id)); 961 - 962 - if (config->fmt == DRM_FORMAT_YVU420) { 963 - fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR], 964 - EXYNOS_CIOCBSA(buf_id)); 965 - fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB], 966 - EXYNOS_CIOCRSA(buf_id)); 967 - } else { 968 - fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB], 969 - EXYNOS_CIOCBSA(buf_id)); 970 - fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR], 971 - EXYNOS_CIOCRSA(buf_id)); 972 - } 973 - break; 974 - case IPP_BUF_DEQUEUE: 975 - fimc_write(ctx, 0x0, EXYNOS_CIOYSA(buf_id)); 976 - fimc_write(ctx, 0x0, EXYNOS_CIOCBSA(buf_id)); 977 - fimc_write(ctx, 0x0, EXYNOS_CIOCRSA(buf_id)); 978 - break; 979 - default: 980 - /* bypass */ 981 - break; 982 - } 983 - 984 - fimc_dst_set_buf_seq(ctx, buf_id, buf_type); 985 - 986 - return 0; 1122 + fimc_dst_set_buf_seq(ctx, 0, true); 987 1123 } 988 1124 989 - static struct exynos_drm_ipp_ops fimc_dst_ops = { 990 - .set_fmt = fimc_dst_set_fmt, 991 - .set_transf = fimc_dst_set_transf, 992 - .set_size = fimc_dst_set_size, 993 - .set_addr = fimc_dst_set_addr, 994 - }; 1125 + static void fimc_stop(struct fimc_context *ctx); 995 1126 996 1127 static irqreturn_t fimc_irq_handler(int irq, void *dev_id) 997 1128 { 998 1129 struct fimc_context *ctx = dev_id; 999 - struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 1000 - struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node; 1001 - struct drm_exynos_ipp_event_work *event_work = 1002 - c_node->event_work; 1003 1130 int buf_id; 1004 1131 1005 1132 DRM_DEBUG_KMS("fimc id[%d]\n", ctx->id); ··· 958 1203 959 1204 DRM_DEBUG_KMS("buf_id[%d]\n", buf_id); 960 1205 961 - fimc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE); 1206 + if (ctx->task) { 1207 + struct exynos_drm_ipp_task *task = ctx->task; 962 1208 963 - event_work->ippdrv = ippdrv; 964 - event_work->buf_id[EXYNOS_DRM_OPS_DST] = buf_id; 965 - queue_work(ippdrv->event_workq, &event_work->work); 1209 + ctx->task = NULL; 1210 + pm_runtime_mark_last_busy(ctx->dev); 1211 + pm_runtime_put_autosuspend(ctx->dev); 1212 + exynos_drm_ipp_task_done(task, 0); 1213 + } 1214 + 1215 + fimc_dst_set_buf_seq(ctx, buf_id, false); 1216 + fimc_stop(ctx); 966 1217 967 1218 return IRQ_HANDLED; 968 - } 969 - 970 - static int fimc_init_prop_list(struct exynos_drm_ippdrv *ippdrv) 971 - { 972 - struct drm_exynos_ipp_prop_list *prop_list = &ippdrv->prop_list; 973 - 974 - prop_list->version = 1; 975 - prop_list->writeback = 1; 976 - prop_list->refresh_min = FIMC_REFRESH_MIN; 977 - prop_list->refresh_max = FIMC_REFRESH_MAX; 978 - prop_list->flip = (1 << EXYNOS_DRM_FLIP_NONE) | 979 - (1 << EXYNOS_DRM_FLIP_VERTICAL) | 980 - (1 << EXYNOS_DRM_FLIP_HORIZONTAL); 981 - prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) | 982 - (1 << EXYNOS_DRM_DEGREE_90) | 983 - (1 << EXYNOS_DRM_DEGREE_180) | 984 - (1 << EXYNOS_DRM_DEGREE_270); 985 - prop_list->csc = 1; 986 - prop_list->crop = 1; 987 - prop_list->crop_max.hsize = FIMC_CROP_MAX; 988 - prop_list->crop_max.vsize = FIMC_CROP_MAX; 989 - prop_list->crop_min.hsize = FIMC_CROP_MIN; 990 - prop_list->crop_min.vsize = FIMC_CROP_MIN; 991 - prop_list->scale = 1; 992 - prop_list->scale_max.hsize = FIMC_SCALE_MAX; 993 - prop_list->scale_max.vsize = FIMC_SCALE_MAX; 994 - prop_list->scale_min.hsize = FIMC_SCALE_MIN; 995 - prop_list->scale_min.vsize = FIMC_SCALE_MIN; 996 - 997 - return 0; 998 - } 999 - 1000 - static inline bool fimc_check_drm_flip(enum drm_exynos_flip flip) 1001 - { 1002 - switch (flip) { 1003 - case EXYNOS_DRM_FLIP_NONE: 1004 - case EXYNOS_DRM_FLIP_VERTICAL: 1005 - case EXYNOS_DRM_FLIP_HORIZONTAL: 1006 - case EXYNOS_DRM_FLIP_BOTH: 1007 - return true; 1008 - default: 1009 - DRM_DEBUG_KMS("invalid flip\n"); 1010 - return false; 1011 - } 1012 - } 1013 - 1014 - static int fimc_ippdrv_check_property(struct device *dev, 1015 - struct drm_exynos_ipp_property *property) 1016 - { 1017 - struct fimc_context *ctx = get_fimc_context(dev); 1018 - struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 1019 - struct drm_exynos_ipp_prop_list *pp = &ippdrv->prop_list; 1020 - struct drm_exynos_ipp_config *config; 1021 - struct drm_exynos_pos *pos; 1022 - struct drm_exynos_sz *sz; 1023 - bool swap; 1024 - int i; 1025 - 1026 - for_each_ipp_ops(i) { 1027 - if ((i == EXYNOS_DRM_OPS_SRC) && 1028 - (property->cmd == IPP_CMD_WB)) 1029 - continue; 1030 - 1031 - config = &property->config[i]; 1032 - pos = &config->pos; 1033 - sz = &config->sz; 1034 - 1035 - /* check for flip */ 1036 - if (!fimc_check_drm_flip(config->flip)) { 1037 - DRM_ERROR("invalid flip.\n"); 1038 - goto err_property; 1039 - } 1040 - 1041 - /* check for degree */ 1042 - switch (config->degree) { 1043 - case EXYNOS_DRM_DEGREE_90: 1044 - case EXYNOS_DRM_DEGREE_270: 1045 - swap = true; 1046 - break; 1047 - case EXYNOS_DRM_DEGREE_0: 1048 - case EXYNOS_DRM_DEGREE_180: 1049 - swap = false; 1050 - break; 1051 - default: 1052 - DRM_ERROR("invalid degree.\n"); 1053 - goto err_property; 1054 - } 1055 - 1056 - /* check for buffer bound */ 1057 - if ((pos->x + pos->w > sz->hsize) || 1058 - (pos->y + pos->h > sz->vsize)) { 1059 - DRM_ERROR("out of buf bound.\n"); 1060 - goto err_property; 1061 - } 1062 - 1063 - /* check for crop */ 1064 - if ((i == EXYNOS_DRM_OPS_SRC) && (pp->crop)) { 1065 - if (swap) { 1066 - if ((pos->h < pp->crop_min.hsize) || 1067 - (sz->vsize > pp->crop_max.hsize) || 1068 - (pos->w < pp->crop_min.vsize) || 1069 - (sz->hsize > pp->crop_max.vsize)) { 1070 - DRM_ERROR("out of crop size.\n"); 1071 - goto err_property; 1072 - } 1073 - } else { 1074 - if ((pos->w < pp->crop_min.hsize) || 1075 - (sz->hsize > pp->crop_max.hsize) || 1076 - (pos->h < pp->crop_min.vsize) || 1077 - (sz->vsize > pp->crop_max.vsize)) { 1078 - DRM_ERROR("out of crop size.\n"); 1079 - goto err_property; 1080 - } 1081 - } 1082 - } 1083 - 1084 - /* check for scale */ 1085 - if ((i == EXYNOS_DRM_OPS_DST) && (pp->scale)) { 1086 - if (swap) { 1087 - if ((pos->h < pp->scale_min.hsize) || 1088 - (sz->vsize > pp->scale_max.hsize) || 1089 - (pos->w < pp->scale_min.vsize) || 1090 - (sz->hsize > pp->scale_max.vsize)) { 1091 - DRM_ERROR("out of scale size.\n"); 1092 - goto err_property; 1093 - } 1094 - } else { 1095 - if ((pos->w < pp->scale_min.hsize) || 1096 - (sz->hsize > pp->scale_max.hsize) || 1097 - (pos->h < pp->scale_min.vsize) || 1098 - (sz->vsize > pp->scale_max.vsize)) { 1099 - DRM_ERROR("out of scale size.\n"); 1100 - goto err_property; 1101 - } 1102 - } 1103 - } 1104 - } 1105 - 1106 - return 0; 1107 - 1108 - err_property: 1109 - for_each_ipp_ops(i) { 1110 - if ((i == EXYNOS_DRM_OPS_SRC) && 1111 - (property->cmd == IPP_CMD_WB)) 1112 - continue; 1113 - 1114 - config = &property->config[i]; 1115 - pos = &config->pos; 1116 - sz = &config->sz; 1117 - 1118 - DRM_ERROR("[%s]f[%d]r[%d]pos[%d %d %d %d]sz[%d %d]\n", 1119 - i ? "dst" : "src", config->flip, config->degree, 1120 - pos->x, pos->y, pos->w, pos->h, 1121 - sz->hsize, sz->vsize); 1122 - } 1123 - 1124 - return -EINVAL; 1125 1219 } 1126 1220 1127 1221 static void fimc_clear_addr(struct fimc_context *ctx) ··· 990 1386 } 991 1387 } 992 1388 993 - static int fimc_ippdrv_reset(struct device *dev) 1389 + static void fimc_reset(struct fimc_context *ctx) 994 1390 { 995 - struct fimc_context *ctx = get_fimc_context(dev); 996 - 997 1391 /* reset h/w block */ 998 1392 fimc_sw_reset(ctx); 999 1393 ··· 999 1397 memset(&ctx->sc, 0x0, sizeof(ctx->sc)); 1000 1398 1001 1399 fimc_clear_addr(ctx); 1002 - 1003 - return 0; 1004 1400 } 1005 1401 1006 - static int fimc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd) 1402 + static void fimc_start(struct fimc_context *ctx) 1007 1403 { 1008 - struct fimc_context *ctx = get_fimc_context(dev); 1009 - struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 1010 - struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node; 1011 - struct drm_exynos_ipp_property *property; 1012 - struct drm_exynos_ipp_config *config; 1013 - struct drm_exynos_pos img_pos[EXYNOS_DRM_OPS_MAX]; 1014 - struct drm_exynos_ipp_set_wb set_wb; 1015 - int ret, i; 1016 1404 u32 cfg0, cfg1; 1017 - 1018 - DRM_DEBUG_KMS("cmd[%d]\n", cmd); 1019 - 1020 - if (!c_node) { 1021 - DRM_ERROR("failed to get c_node.\n"); 1022 - return -EINVAL; 1023 - } 1024 - 1025 - property = &c_node->property; 1026 1405 1027 1406 fimc_mask_irq(ctx, true); 1028 1407 1029 - for_each_ipp_ops(i) { 1030 - config = &property->config[i]; 1031 - img_pos[i] = config->pos; 1032 - } 1033 - 1034 - ret = fimc_set_prescaler(ctx, &ctx->sc, 1035 - &img_pos[EXYNOS_DRM_OPS_SRC], 1036 - &img_pos[EXYNOS_DRM_OPS_DST]); 1037 - if (ret) { 1038 - dev_err(dev, "failed to set prescaler.\n"); 1039 - return ret; 1040 - } 1041 - 1042 - /* If set ture, we can save jpeg about screen */ 1408 + /* If set true, we can save jpeg about screen */ 1043 1409 fimc_handle_jpeg(ctx, false); 1044 1410 fimc_set_scaler(ctx, &ctx->sc); 1045 1411 1046 - switch (cmd) { 1047 - case IPP_CMD_M2M: 1048 - fimc_set_type_ctrl(ctx, FIMC_WB_NONE); 1049 - fimc_handle_lastend(ctx, false); 1412 + fimc_set_type_ctrl(ctx); 1413 + fimc_handle_lastend(ctx, false); 1050 1414 1051 - /* setup dma */ 1052 - cfg0 = fimc_read(ctx, EXYNOS_MSCTRL); 1053 - cfg0 &= ~EXYNOS_MSCTRL_INPUT_MASK; 1054 - cfg0 |= EXYNOS_MSCTRL_INPUT_MEMORY; 1055 - fimc_write(ctx, cfg0, EXYNOS_MSCTRL); 1056 - break; 1057 - case IPP_CMD_WB: 1058 - fimc_set_type_ctrl(ctx, FIMC_WB_A); 1059 - fimc_handle_lastend(ctx, true); 1060 - 1061 - /* setup FIMD */ 1062 - ret = fimc_set_camblk_fimd0_wb(ctx); 1063 - if (ret < 0) { 1064 - dev_err(dev, "camblk setup failed.\n"); 1065 - return ret; 1066 - } 1067 - 1068 - set_wb.enable = 1; 1069 - set_wb.refresh = property->refresh_rate; 1070 - exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb); 1071 - break; 1072 - case IPP_CMD_OUTPUT: 1073 - default: 1074 - ret = -EINVAL; 1075 - dev_err(dev, "invalid operations.\n"); 1076 - return ret; 1077 - } 1415 + /* setup dma */ 1416 + cfg0 = fimc_read(ctx, EXYNOS_MSCTRL); 1417 + cfg0 &= ~EXYNOS_MSCTRL_INPUT_MASK; 1418 + cfg0 |= EXYNOS_MSCTRL_INPUT_MEMORY; 1419 + fimc_write(ctx, cfg0, EXYNOS_MSCTRL); 1078 1420 1079 1421 /* Reset status */ 1080 1422 fimc_write(ctx, 0x0, EXYNOS_CISTATUS); ··· 1044 1498 1045 1499 fimc_clear_bits(ctx, EXYNOS_CIOCTRL, EXYNOS_CIOCTRL_WEAVE_MASK); 1046 1500 1047 - if (cmd == IPP_CMD_M2M) 1048 - fimc_set_bits(ctx, EXYNOS_MSCTRL, EXYNOS_MSCTRL_ENVID); 1049 - 1050 - return 0; 1501 + fimc_set_bits(ctx, EXYNOS_MSCTRL, EXYNOS_MSCTRL_ENVID); 1051 1502 } 1052 1503 1053 - static void fimc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd) 1504 + static void fimc_stop(struct fimc_context *ctx) 1054 1505 { 1055 - struct fimc_context *ctx = get_fimc_context(dev); 1056 - struct drm_exynos_ipp_set_wb set_wb = {0, 0}; 1057 1506 u32 cfg; 1058 1507 1059 - DRM_DEBUG_KMS("cmd[%d]\n", cmd); 1060 - 1061 - switch (cmd) { 1062 - case IPP_CMD_M2M: 1063 - /* Source clear */ 1064 - cfg = fimc_read(ctx, EXYNOS_MSCTRL); 1065 - cfg &= ~EXYNOS_MSCTRL_INPUT_MASK; 1066 - cfg &= ~EXYNOS_MSCTRL_ENVID; 1067 - fimc_write(ctx, cfg, EXYNOS_MSCTRL); 1068 - break; 1069 - case IPP_CMD_WB: 1070 - exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb); 1071 - break; 1072 - case IPP_CMD_OUTPUT: 1073 - default: 1074 - dev_err(dev, "invalid operations.\n"); 1075 - break; 1076 - } 1508 + /* Source clear */ 1509 + cfg = fimc_read(ctx, EXYNOS_MSCTRL); 1510 + cfg &= ~EXYNOS_MSCTRL_INPUT_MASK; 1511 + cfg &= ~EXYNOS_MSCTRL_ENVID; 1512 + fimc_write(ctx, cfg, EXYNOS_MSCTRL); 1077 1513 1078 1514 fimc_mask_irq(ctx, false); 1079 1515 ··· 1073 1545 fimc_set_bits(ctx, EXYNOS_CIGCTRL, EXYNOS_CIGCTRL_IRQ_END_DISABLE); 1074 1546 } 1075 1547 1548 + static int fimc_commit(struct exynos_drm_ipp *ipp, 1549 + struct exynos_drm_ipp_task *task) 1550 + { 1551 + struct fimc_context *ctx = 1552 + container_of(ipp, struct fimc_context, ipp); 1553 + 1554 + pm_runtime_get_sync(ctx->dev); 1555 + ctx->task = task; 1556 + 1557 + fimc_src_set_fmt(ctx, task->src.buf.fourcc, task->src.buf.modifier); 1558 + fimc_src_set_size(ctx, &task->src); 1559 + fimc_src_set_transf(ctx, DRM_MODE_ROTATE_0); 1560 + fimc_src_set_addr(ctx, &task->src); 1561 + fimc_dst_set_fmt(ctx, task->dst.buf.fourcc, task->dst.buf.modifier); 1562 + fimc_dst_set_transf(ctx, task->transform.rotation); 1563 + fimc_dst_set_size(ctx, &task->dst); 1564 + fimc_dst_set_addr(ctx, &task->dst); 1565 + fimc_set_prescaler(ctx, &ctx->sc, &task->src.rect, &task->dst.rect); 1566 + fimc_start(ctx); 1567 + 1568 + return 0; 1569 + } 1570 + 1571 + static void fimc_abort(struct exynos_drm_ipp *ipp, 1572 + struct exynos_drm_ipp_task *task) 1573 + { 1574 + struct fimc_context *ctx = 1575 + container_of(ipp, struct fimc_context, ipp); 1576 + 1577 + fimc_reset(ctx); 1578 + 1579 + if (ctx->task) { 1580 + struct exynos_drm_ipp_task *task = ctx->task; 1581 + 1582 + ctx->task = NULL; 1583 + pm_runtime_mark_last_busy(ctx->dev); 1584 + pm_runtime_put_autosuspend(ctx->dev); 1585 + exynos_drm_ipp_task_done(task, -EIO); 1586 + } 1587 + } 1588 + 1589 + static struct exynos_drm_ipp_funcs ipp_funcs = { 1590 + .commit = fimc_commit, 1591 + .abort = fimc_abort, 1592 + }; 1593 + 1594 + static int fimc_bind(struct device *dev, struct device *master, void *data) 1595 + { 1596 + struct fimc_context *ctx = dev_get_drvdata(dev); 1597 + struct drm_device *drm_dev = data; 1598 + struct exynos_drm_ipp *ipp = &ctx->ipp; 1599 + 1600 + ctx->drm_dev = drm_dev; 1601 + drm_iommu_attach_device(drm_dev, dev); 1602 + 1603 + exynos_drm_ipp_register(drm_dev, ipp, &ipp_funcs, 1604 + DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE | 1605 + DRM_EXYNOS_IPP_CAP_SCALE | DRM_EXYNOS_IPP_CAP_CONVERT, 1606 + ctx->formats, ctx->num_formats, "fimc"); 1607 + 1608 + dev_info(dev, "The exynos fimc has been probed successfully\n"); 1609 + 1610 + return 0; 1611 + } 1612 + 1613 + static void fimc_unbind(struct device *dev, struct device *master, 1614 + void *data) 1615 + { 1616 + struct fimc_context *ctx = dev_get_drvdata(dev); 1617 + struct drm_device *drm_dev = data; 1618 + struct exynos_drm_ipp *ipp = &ctx->ipp; 1619 + 1620 + exynos_drm_ipp_unregister(drm_dev, ipp); 1621 + drm_iommu_detach_device(drm_dev, dev); 1622 + } 1623 + 1624 + static const struct component_ops fimc_component_ops = { 1625 + .bind = fimc_bind, 1626 + .unbind = fimc_unbind, 1627 + }; 1628 + 1076 1629 static void fimc_put_clocks(struct fimc_context *ctx) 1077 1630 { 1078 1631 int i; ··· 1168 1559 1169 1560 static int fimc_setup_clocks(struct fimc_context *ctx) 1170 1561 { 1171 - struct device *fimc_dev = ctx->ippdrv.dev; 1562 + struct device *fimc_dev = ctx->dev; 1172 1563 struct device *dev; 1173 1564 int ret, i; 1174 1565 ··· 1183 1574 1184 1575 ctx->clocks[i] = clk_get(dev, fimc_clock_names[i]); 1185 1576 if (IS_ERR(ctx->clocks[i])) { 1186 - if (i >= FIMC_CLK_MUX) 1187 - break; 1188 1577 ret = PTR_ERR(ctx->clocks[i]); 1189 1578 dev_err(fimc_dev, "failed to get clock: %s\n", 1190 1579 fimc_clock_names[i]); 1191 1580 goto e_clk_free; 1192 1581 } 1193 1582 } 1194 - 1195 - /* Optional FIMC LCLK parent clock setting */ 1196 - if (!IS_ERR(ctx->clocks[FIMC_CLK_PARENT])) { 1197 - ret = clk_set_parent(ctx->clocks[FIMC_CLK_MUX], 1198 - ctx->clocks[FIMC_CLK_PARENT]); 1199 - if (ret < 0) { 1200 - dev_err(fimc_dev, "failed to set parent.\n"); 1201 - goto e_clk_free; 1202 - } 1203 - } 1204 - 1205 - ret = clk_set_rate(ctx->clocks[FIMC_CLK_LCLK], ctx->clk_frequency); 1206 - if (ret < 0) 1207 - goto e_clk_free; 1208 1583 1209 1584 ret = clk_prepare_enable(ctx->clocks[FIMC_CLK_LCLK]); 1210 1585 if (!ret) ··· 1198 1605 return ret; 1199 1606 } 1200 1607 1201 - static int fimc_parse_dt(struct fimc_context *ctx) 1608 + int exynos_drm_check_fimc_device(struct device *dev) 1202 1609 { 1203 - struct device_node *node = ctx->ippdrv.dev->of_node; 1610 + unsigned int id = of_alias_get_id(dev->of_node, "fimc"); 1204 1611 1205 - /* Handle only devices that support the LCD Writeback data path */ 1206 - if (!of_property_read_bool(node, "samsung,lcd-wb")) 1207 - return -ENODEV; 1208 - 1209 - if (of_property_read_u32(node, "clock-frequency", 1210 - &ctx->clk_frequency)) 1211 - ctx->clk_frequency = FIMC_DEFAULT_LCLK_FREQUENCY; 1212 - 1213 - ctx->id = of_alias_get_id(node, "fimc"); 1214 - 1215 - if (ctx->id < 0) { 1216 - dev_err(ctx->ippdrv.dev, "failed to get node alias id.\n"); 1217 - return -EINVAL; 1218 - } 1219 - 1220 - return 0; 1612 + if (id >= 0 && (BIT(id) & fimc_mask)) 1613 + return 0; 1614 + return -ENODEV; 1221 1615 } 1616 + 1617 + static const unsigned int fimc_formats[] = { 1618 + DRM_FORMAT_XRGB8888, DRM_FORMAT_RGB565, 1619 + DRM_FORMAT_NV12, DRM_FORMAT_NV16, DRM_FORMAT_NV21, DRM_FORMAT_NV61, 1620 + DRM_FORMAT_UYVY, DRM_FORMAT_VYUY, DRM_FORMAT_YUYV, DRM_FORMAT_YVYU, 1621 + DRM_FORMAT_YUV420, DRM_FORMAT_YVU420, DRM_FORMAT_YUV422, 1622 + DRM_FORMAT_YUV444, 1623 + }; 1624 + 1625 + static const unsigned int fimc_tiled_formats[] = { 1626 + DRM_FORMAT_NV12, DRM_FORMAT_NV21, 1627 + }; 1628 + 1629 + static const struct drm_exynos_ipp_limit fimc_4210_limits_v1[] = { 1630 + { IPP_SIZE_LIMIT(BUFFER, .h = { 16, 8192, 8 }, .v = { 16, 8192, 2 }) }, 1631 + { IPP_SIZE_LIMIT(AREA, .h = { 16, 4224, 2 }, .v = { 16, 0, 2 }) }, 1632 + { IPP_SIZE_LIMIT(ROTATED, .h = { 128, 1920 }, .v = { 128, 0 }) }, 1633 + { IPP_SCALE_LIMIT(.h = { (1 << 16) / 64, (1 << 16) * 64 }, 1634 + .v = { (1 << 16) / 64, (1 << 16) * 64 }) }, 1635 + }; 1636 + 1637 + static const struct drm_exynos_ipp_limit fimc_4210_limits_v2[] = { 1638 + { IPP_SIZE_LIMIT(BUFFER, .h = { 16, 8192, 8 }, .v = { 16, 8192, 2 }) }, 1639 + { IPP_SIZE_LIMIT(AREA, .h = { 16, 1920, 2 }, .v = { 16, 0, 2 }) }, 1640 + { IPP_SIZE_LIMIT(ROTATED, .h = { 128, 1366 }, .v = { 128, 0 }) }, 1641 + { IPP_SCALE_LIMIT(.h = { (1 << 16) / 64, (1 << 16) * 64 }, 1642 + .v = { (1 << 16) / 64, (1 << 16) * 64 }) }, 1643 + }; 1644 + 1645 + static const struct drm_exynos_ipp_limit fimc_4210_limits_tiled_v1[] = { 1646 + { IPP_SIZE_LIMIT(BUFFER, .h = { 128, 1920, 128 }, .v = { 32, 1920, 32 }) }, 1647 + { IPP_SIZE_LIMIT(AREA, .h = { 128, 1920, 2 }, .v = { 128, 0, 2 }) }, 1648 + { IPP_SCALE_LIMIT(.h = { (1 << 16) / 64, (1 << 16) * 64 }, 1649 + .v = { (1 << 16) / 64, (1 << 16) * 64 }) }, 1650 + }; 1651 + 1652 + static const struct drm_exynos_ipp_limit fimc_4210_limits_tiled_v2[] = { 1653 + { IPP_SIZE_LIMIT(BUFFER, .h = { 128, 1920, 128 }, .v = { 32, 1920, 32 }) }, 1654 + { IPP_SIZE_LIMIT(AREA, .h = { 128, 1366, 2 }, .v = { 128, 0, 2 }) }, 1655 + { IPP_SCALE_LIMIT(.h = { (1 << 16) / 64, (1 << 16) * 64 }, 1656 + .v = { (1 << 16) / 64, (1 << 16) * 64 }) }, 1657 + }; 1222 1658 1223 1659 static int fimc_probe(struct platform_device *pdev) 1224 1660 { 1661 + const struct drm_exynos_ipp_limit *limits; 1662 + struct exynos_drm_ipp_formats *formats; 1225 1663 struct device *dev = &pdev->dev; 1226 1664 struct fimc_context *ctx; 1227 1665 struct resource *res; 1228 - struct exynos_drm_ippdrv *ippdrv; 1229 1666 int ret; 1667 + int i, j, num_limits, num_formats; 1230 1668 1231 - if (!dev->of_node) { 1232 - dev_err(dev, "device tree node not found.\n"); 1669 + if (exynos_drm_check_fimc_device(dev) != 0) 1233 1670 return -ENODEV; 1234 - } 1235 1671 1236 1672 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); 1237 1673 if (!ctx) 1238 1674 return -ENOMEM; 1239 1675 1240 - ctx->ippdrv.dev = dev; 1676 + ctx->dev = dev; 1677 + ctx->id = of_alias_get_id(dev->of_node, "fimc"); 1241 1678 1242 - ret = fimc_parse_dt(ctx); 1243 - if (ret < 0) 1244 - return ret; 1679 + /* construct formats/limits array */ 1680 + num_formats = ARRAY_SIZE(fimc_formats) + ARRAY_SIZE(fimc_tiled_formats); 1681 + formats = devm_kzalloc(dev, sizeof(*formats) * num_formats, GFP_KERNEL); 1682 + if (!formats) 1683 + return -ENOMEM; 1245 1684 1246 - ctx->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node, 1247 - "samsung,sysreg"); 1248 - if (IS_ERR(ctx->sysreg)) { 1249 - dev_err(dev, "syscon regmap lookup failed.\n"); 1250 - return PTR_ERR(ctx->sysreg); 1685 + /* linear formats */ 1686 + if (ctx->id < 3) { 1687 + limits = fimc_4210_limits_v1; 1688 + num_limits = ARRAY_SIZE(fimc_4210_limits_v1); 1689 + } else { 1690 + limits = fimc_4210_limits_v2; 1691 + num_limits = ARRAY_SIZE(fimc_4210_limits_v2); 1251 1692 } 1693 + for (i = 0; i < ARRAY_SIZE(fimc_formats); i++) { 1694 + formats[i].fourcc = fimc_formats[i]; 1695 + formats[i].type = DRM_EXYNOS_IPP_FORMAT_SOURCE | 1696 + DRM_EXYNOS_IPP_FORMAT_DESTINATION; 1697 + formats[i].limits = limits; 1698 + formats[i].num_limits = num_limits; 1699 + } 1700 + 1701 + /* tiled formats */ 1702 + if (ctx->id < 3) { 1703 + limits = fimc_4210_limits_tiled_v1; 1704 + num_limits = ARRAY_SIZE(fimc_4210_limits_tiled_v1); 1705 + } else { 1706 + limits = fimc_4210_limits_tiled_v2; 1707 + num_limits = ARRAY_SIZE(fimc_4210_limits_tiled_v2); 1708 + } 1709 + for (j = i, i = 0; i < ARRAY_SIZE(fimc_tiled_formats); j++, i++) { 1710 + formats[j].fourcc = fimc_tiled_formats[i]; 1711 + formats[j].modifier = DRM_FORMAT_MOD_SAMSUNG_64_32_TILE; 1712 + formats[j].type = DRM_EXYNOS_IPP_FORMAT_SOURCE | 1713 + DRM_EXYNOS_IPP_FORMAT_DESTINATION; 1714 + formats[j].limits = limits; 1715 + formats[j].num_limits = num_limits; 1716 + } 1717 + 1718 + ctx->formats = formats; 1719 + ctx->num_formats = num_formats; 1252 1720 1253 1721 /* resource memory */ 1254 1722 ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ··· 1324 1670 return -ENOENT; 1325 1671 } 1326 1672 1327 - ctx->irq = res->start; 1328 - ret = devm_request_threaded_irq(dev, ctx->irq, NULL, fimc_irq_handler, 1329 - IRQF_ONESHOT, "drm_fimc", ctx); 1673 + ret = devm_request_irq(dev, res->start, fimc_irq_handler, 1674 + 0, dev_name(dev), ctx); 1330 1675 if (ret < 0) { 1331 1676 dev_err(dev, "failed to request irq.\n"); 1332 1677 return ret; ··· 1335 1682 if (ret < 0) 1336 1683 return ret; 1337 1684 1338 - ippdrv = &ctx->ippdrv; 1339 - ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &fimc_src_ops; 1340 - ippdrv->ops[EXYNOS_DRM_OPS_DST] = &fimc_dst_ops; 1341 - ippdrv->check_property = fimc_ippdrv_check_property; 1342 - ippdrv->reset = fimc_ippdrv_reset; 1343 - ippdrv->start = fimc_ippdrv_start; 1344 - ippdrv->stop = fimc_ippdrv_stop; 1345 - ret = fimc_init_prop_list(ippdrv); 1346 - if (ret < 0) { 1347 - dev_err(dev, "failed to init property list.\n"); 1348 - goto err_put_clk; 1349 - } 1350 - 1351 - DRM_DEBUG_KMS("id[%d]ippdrv[%pK]\n", ctx->id, ippdrv); 1352 - 1353 1685 spin_lock_init(&ctx->lock); 1354 1686 platform_set_drvdata(pdev, ctx); 1355 1687 1688 + pm_runtime_use_autosuspend(dev); 1689 + pm_runtime_set_autosuspend_delay(dev, FIMC_AUTOSUSPEND_DELAY); 1356 1690 pm_runtime_enable(dev); 1357 1691 1358 - ret = exynos_drm_ippdrv_register(ippdrv); 1359 - if (ret < 0) { 1360 - dev_err(dev, "failed to register drm fimc device.\n"); 1692 + ret = component_add(dev, &fimc_component_ops); 1693 + if (ret) 1361 1694 goto err_pm_dis; 1362 - } 1363 1695 1364 1696 dev_info(dev, "drm fimc registered successfully.\n"); 1365 1697 1366 1698 return 0; 1367 1699 1368 1700 err_pm_dis: 1701 + pm_runtime_dont_use_autosuspend(dev); 1369 1702 pm_runtime_disable(dev); 1370 - err_put_clk: 1371 1703 fimc_put_clocks(ctx); 1372 1704 1373 1705 return ret; ··· 1362 1724 { 1363 1725 struct device *dev = &pdev->dev; 1364 1726 struct fimc_context *ctx = get_fimc_context(dev); 1365 - struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 1366 1727 1367 - exynos_drm_ippdrv_unregister(ippdrv); 1728 + component_del(dev, &fimc_component_ops); 1729 + pm_runtime_dont_use_autosuspend(dev); 1730 + pm_runtime_disable(dev); 1368 1731 1369 1732 fimc_put_clocks(ctx); 1370 - pm_runtime_set_suspended(dev); 1371 - pm_runtime_disable(dev); 1372 1733 1373 1734 return 0; 1374 1735 } 1375 1736 1376 1737 #ifdef CONFIG_PM 1377 - static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable) 1378 - { 1379 - DRM_DEBUG_KMS("enable[%d]\n", enable); 1380 - 1381 - if (enable) { 1382 - clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]); 1383 - clk_prepare_enable(ctx->clocks[FIMC_CLK_WB_A]); 1384 - ctx->suspended = false; 1385 - } else { 1386 - clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]); 1387 - clk_disable_unprepare(ctx->clocks[FIMC_CLK_WB_A]); 1388 - ctx->suspended = true; 1389 - } 1390 - 1391 - return 0; 1392 - } 1393 - 1394 1738 static int fimc_runtime_suspend(struct device *dev) 1395 1739 { 1396 1740 struct fimc_context *ctx = get_fimc_context(dev); 1397 1741 1398 1742 DRM_DEBUG_KMS("id[%d]\n", ctx->id); 1399 - 1400 - return fimc_clk_ctrl(ctx, false); 1743 + clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]); 1744 + return 0; 1401 1745 } 1402 1746 1403 1747 static int fimc_runtime_resume(struct device *dev) ··· 1387 1767 struct fimc_context *ctx = get_fimc_context(dev); 1388 1768 1389 1769 DRM_DEBUG_KMS("id[%d]\n", ctx->id); 1390 - 1391 - return fimc_clk_ctrl(ctx, true); 1770 + return clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]); 1392 1771 } 1393 1772 #endif 1394 1773 ··· 1414 1795 .pm = &fimc_pm_ops, 1415 1796 }, 1416 1797 }; 1417 -
-23
drivers/gpu/drm/exynos/exynos_drm_fimc.h
··· 1 - /* 2 - * Copyright (c) 2012 Samsung Electronics Co., Ltd. 3 - * 4 - * Authors: 5 - * Eunchul Kim <chulspro.kim@samsung.com> 6 - * Jinyoung Jeon <jy0.jeon@samsung.com> 7 - * Sangmin Lee <lsmin.lee@samsung.com> 8 - * 9 - * This program is free software; you can redistribute it and/or modify it 10 - * under the terms of the GNU General Public License as published by the 11 - * Free Software Foundation; either version 2 of the License, or (at your 12 - * option) any later version. 13 - */ 14 - 15 - #ifndef _EXYNOS_DRM_FIMC_H_ 16 - #define _EXYNOS_DRM_FIMC_H_ 17 - 18 - /* 19 - * TODO 20 - * FIMD output interface notifier callback. 21 - */ 22 - 23 - #endif /* _EXYNOS_DRM_FIMC_H_ */
+8
drivers/gpu/drm/exynos/exynos_drm_fimd.c
··· 121 121 .has_limited_fmt = 1, 122 122 }; 123 123 124 + static struct fimd_driver_data s5pv210_fimd_driver_data = { 125 + .timing_base = 0x0, 126 + .has_shadowcon = 1, 127 + .has_clksel = 1, 128 + }; 129 + 124 130 static struct fimd_driver_data exynos3_fimd_driver_data = { 125 131 .timing_base = 0x20000, 126 132 .lcdblk_offset = 0x210, ··· 199 193 static const struct of_device_id fimd_driver_dt_match[] = { 200 194 { .compatible = "samsung,s3c6400-fimd", 201 195 .data = &s3c64xx_fimd_driver_data }, 196 + { .compatible = "samsung,s5pv210-fimd", 197 + .data = &s5pv210_fimd_driver_data }, 202 198 { .compatible = "samsung,exynos3250-fimd", 203 199 .data = &exynos3_fimd_driver_data }, 204 200 { .compatible = "samsung,exynos4210-fimd",
+4 -17
drivers/gpu/drm/exynos/exynos_drm_gem.c
··· 431 431 return 0; 432 432 } 433 433 434 - int exynos_drm_gem_fault(struct vm_fault *vmf) 434 + vm_fault_t exynos_drm_gem_fault(struct vm_fault *vmf) 435 435 { 436 436 struct vm_area_struct *vma = vmf->vma; 437 437 struct drm_gem_object *obj = vma->vm_private_data; 438 438 struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj); 439 439 unsigned long pfn; 440 440 pgoff_t page_offset; 441 - int ret; 442 441 443 442 page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; 444 443 445 444 if (page_offset >= (exynos_gem->size >> PAGE_SHIFT)) { 446 445 DRM_ERROR("invalid page offset\n"); 447 - ret = -EINVAL; 448 - goto out; 446 + return VM_FAULT_SIGBUS; 449 447 } 450 448 451 449 pfn = page_to_pfn(exynos_gem->pages[page_offset]); 452 - ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV)); 453 - 454 - out: 455 - switch (ret) { 456 - case 0: 457 - case -ERESTARTSYS: 458 - case -EINTR: 459 - return VM_FAULT_NOPAGE; 460 - case -ENOMEM: 461 - return VM_FAULT_OOM; 462 - default: 463 - return VM_FAULT_SIGBUS; 464 - } 450 + return vmf_insert_mixed(vma, vmf->address, 451 + __pfn_to_pfn_t(pfn, PFN_DEV)); 465 452 } 466 453 467 454 static int exynos_drm_gem_mmap_obj(struct drm_gem_object *obj,
+2 -1
drivers/gpu/drm/exynos/exynos_drm_gem.h
··· 13 13 #define _EXYNOS_DRM_GEM_H_ 14 14 15 15 #include <drm/drm_gem.h> 16 + #include <linux/mm_types.h> 16 17 17 18 #define to_exynos_gem(x) container_of(x, struct exynos_drm_gem, base) 18 19 ··· 112 111 struct drm_mode_create_dumb *args); 113 112 114 113 /* page fault handler and mmap fault address(virtual) to physical memory. */ 115 - int exynos_drm_gem_fault(struct vm_fault *vmf); 114 + vm_fault_t exynos_drm_gem_fault(struct vm_fault *vmf); 116 115 117 116 /* set vm_flags and we can change the vm attribute to other one at here. */ 118 117 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+339 -744
drivers/gpu/drm/exynos/exynos_drm_gsc.c
··· 12 12 * 13 13 */ 14 14 #include <linux/kernel.h> 15 + #include <linux/component.h> 15 16 #include <linux/platform_device.h> 16 17 #include <linux/clk.h> 17 18 #include <linux/pm_runtime.h> 18 19 #include <linux/mfd/syscon.h> 20 + #include <linux/of_device.h> 19 21 #include <linux/regmap.h> 20 22 21 23 #include <drm/drmP.h> 22 24 #include <drm/exynos_drm.h> 23 25 #include "regs-gsc.h" 24 26 #include "exynos_drm_drv.h" 27 + #include "exynos_drm_iommu.h" 25 28 #include "exynos_drm_ipp.h" 26 - #include "exynos_drm_gsc.h" 27 29 28 30 /* 29 31 * GSC stands for General SCaler and ··· 33 31 * input DMA reads image data from the memory. 34 32 * output DMA writes image data to memory. 35 33 * GSC supports image rotation and image effect functions. 36 - * 37 - * M2M operation : supports crop/scale/rotation/csc so on. 38 - * Memory ----> GSC H/W ----> Memory. 39 - * Writeback operation : supports cloned screen with FIMD. 40 - * FIMD ----> GSC H/W ----> Memory. 41 - * Output operation : supports direct display using local path. 42 - * Memory ----> GSC H/W ----> FIMD, Mixer. 43 34 */ 44 35 45 - /* 46 - * TODO 47 - * 1. check suspend/resume api if needed. 48 - * 2. need to check use case platform_device_id. 49 - * 3. check src/dst size with, height. 50 - * 4. added check_prepare api for right register. 51 - * 5. need to add supported list in prop_list. 52 - * 6. check prescaler/scaler optimization. 53 - */ 54 36 55 - #define GSC_MAX_DEVS 4 37 + #define GSC_MAX_CLOCKS 8 56 38 #define GSC_MAX_SRC 4 57 39 #define GSC_MAX_DST 16 58 40 #define GSC_RESET_TIMEOUT 50 ··· 51 65 #define GSC_SC_DOWN_RATIO_4_8 131072 52 66 #define GSC_SC_DOWN_RATIO_3_8 174762 53 67 #define GSC_SC_DOWN_RATIO_2_8 262144 54 - #define GSC_REFRESH_MIN 12 55 - #define GSC_REFRESH_MAX 60 56 68 #define GSC_CROP_MAX 8192 57 69 #define GSC_CROP_MIN 32 58 70 #define GSC_SCALE_MAX 4224 ··· 61 77 #define GSC_COEF_H_8T 8 62 78 #define GSC_COEF_V_4T 4 63 79 #define GSC_COEF_DEPTH 3 80 + #define GSC_AUTOSUSPEND_DELAY 2000 64 81 65 82 #define get_gsc_context(dev) platform_get_drvdata(to_platform_device(dev)) 66 - #define get_ctx_from_ippdrv(ippdrv) container_of(ippdrv,\ 67 - struct gsc_context, ippdrv); 68 83 #define gsc_read(offset) readl(ctx->regs + (offset)) 69 84 #define gsc_write(cfg, offset) writel(cfg, ctx->regs + (offset)) 70 85 ··· 87 104 }; 88 105 89 106 /* 90 - * A structure of scaler capability. 91 - * 92 - * find user manual 49.2 features. 93 - * @tile_w: tile mode or rotation width. 94 - * @tile_h: tile mode or rotation height. 95 - * @w: other cases width. 96 - * @h: other cases height. 97 - */ 98 - struct gsc_capability { 99 - /* tile or rotation */ 100 - u32 tile_w; 101 - u32 tile_h; 102 - /* other cases */ 103 - u32 w; 104 - u32 h; 105 - }; 106 - 107 - /* 108 107 * A structure of gsc context. 109 108 * 110 - * @ippdrv: prepare initialization using ippdrv. 111 109 * @regs_res: register resources. 112 110 * @regs: memory mapped io registers. 113 - * @sysreg: handle to SYSREG block regmap. 114 - * @lock: locking of operations. 115 111 * @gsc_clk: gsc gate clock. 116 112 * @sc: scaler infomations. 117 113 * @id: gsc id. 118 114 * @irq: irq number. 119 115 * @rotation: supports rotation of src. 120 - * @suspended: qos operations. 121 116 */ 122 117 struct gsc_context { 123 - struct exynos_drm_ippdrv ippdrv; 118 + struct exynos_drm_ipp ipp; 119 + struct drm_device *drm_dev; 120 + struct device *dev; 121 + struct exynos_drm_ipp_task *task; 122 + struct exynos_drm_ipp_formats *formats; 123 + unsigned int num_formats; 124 + 124 125 struct resource *regs_res; 125 126 void __iomem *regs; 126 - struct regmap *sysreg; 127 - struct mutex lock; 128 - struct clk *gsc_clk; 127 + const char **clk_names; 128 + struct clk *clocks[GSC_MAX_CLOCKS]; 129 + int num_clocks; 129 130 struct gsc_scaler sc; 130 131 int id; 131 132 int irq; 132 133 bool rotation; 133 - bool suspended; 134 + }; 135 + 136 + /** 137 + * struct gsc_driverdata - per device type driver data for init time. 138 + * 139 + * @limits: picture size limits array 140 + * @clk_names: names of clocks needed by this variant 141 + * @num_clocks: the number of clocks needed by this variant 142 + */ 143 + struct gsc_driverdata { 144 + const struct drm_exynos_ipp_limit *limits; 145 + int num_limits; 146 + const char *clk_names[GSC_MAX_CLOCKS]; 147 + int num_clocks; 134 148 }; 135 149 136 150 /* 8-tap Filter Coefficient */ ··· 418 438 return 0; 419 439 } 420 440 421 - static void gsc_set_gscblk_fimd_wb(struct gsc_context *ctx, bool enable) 422 - { 423 - unsigned int gscblk_cfg; 424 - 425 - if (!ctx->sysreg) 426 - return; 427 - 428 - regmap_read(ctx->sysreg, SYSREG_GSCBLK_CFG1, &gscblk_cfg); 429 - 430 - if (enable) 431 - gscblk_cfg |= GSC_BLK_DISP1WB_DEST(ctx->id) | 432 - GSC_BLK_GSCL_WB_IN_SRC_SEL(ctx->id) | 433 - GSC_BLK_SW_RESET_WB_DEST(ctx->id); 434 - else 435 - gscblk_cfg |= GSC_BLK_PXLASYNC_LO_MASK_WB(ctx->id); 436 - 437 - regmap_write(ctx->sysreg, SYSREG_GSCBLK_CFG1, gscblk_cfg); 438 - } 439 - 440 441 static void gsc_handle_irq(struct gsc_context *ctx, bool enable, 441 442 bool overflow, bool done) 442 443 { ··· 448 487 } 449 488 450 489 451 - static int gsc_src_set_fmt(struct device *dev, u32 fmt) 490 + static void gsc_src_set_fmt(struct gsc_context *ctx, u32 fmt) 452 491 { 453 - struct gsc_context *ctx = get_gsc_context(dev); 454 - struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 455 492 u32 cfg; 456 493 457 494 DRM_DEBUG_KMS("fmt[0x%x]\n", fmt); ··· 465 506 cfg |= GSC_IN_RGB565; 466 507 break; 467 508 case DRM_FORMAT_XRGB8888: 509 + case DRM_FORMAT_ARGB8888: 468 510 cfg |= GSC_IN_XRGB8888; 469 511 break; 470 512 case DRM_FORMAT_BGRX8888: ··· 508 548 cfg |= (GSC_IN_CHROMA_ORDER_CBCR | 509 549 GSC_IN_YUV420_2P); 510 550 break; 511 - default: 512 - dev_err(ippdrv->dev, "invalid target yuv order 0x%x.\n", fmt); 513 - return -EINVAL; 514 551 } 515 552 516 553 gsc_write(cfg, GSC_IN_CON); 517 - 518 - return 0; 519 554 } 520 555 521 - static int gsc_src_set_transf(struct device *dev, 522 - enum drm_exynos_degree degree, 523 - enum drm_exynos_flip flip, bool *swap) 556 + static void gsc_src_set_transf(struct gsc_context *ctx, unsigned int rotation) 524 557 { 525 - struct gsc_context *ctx = get_gsc_context(dev); 526 - struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 558 + unsigned int degree = rotation & DRM_MODE_ROTATE_MASK; 527 559 u32 cfg; 528 - 529 - DRM_DEBUG_KMS("degree[%d]flip[0x%x]\n", degree, flip); 530 560 531 561 cfg = gsc_read(GSC_IN_CON); 532 562 cfg &= ~GSC_IN_ROT_MASK; 533 563 534 564 switch (degree) { 535 - case EXYNOS_DRM_DEGREE_0: 536 - if (flip & EXYNOS_DRM_FLIP_VERTICAL) 565 + case DRM_MODE_ROTATE_0: 566 + if (rotation & DRM_MODE_REFLECT_Y) 537 567 cfg |= GSC_IN_ROT_XFLIP; 538 - if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) 568 + if (rotation & DRM_MODE_REFLECT_X) 539 569 cfg |= GSC_IN_ROT_YFLIP; 540 570 break; 541 - case EXYNOS_DRM_DEGREE_90: 542 - if (flip & EXYNOS_DRM_FLIP_VERTICAL) 543 - cfg |= GSC_IN_ROT_90_XFLIP; 544 - else if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) 545 - cfg |= GSC_IN_ROT_90_YFLIP; 546 - else 547 - cfg |= GSC_IN_ROT_90; 571 + case DRM_MODE_ROTATE_90: 572 + cfg |= GSC_IN_ROT_90; 573 + if (rotation & DRM_MODE_REFLECT_Y) 574 + cfg |= GSC_IN_ROT_XFLIP; 575 + if (rotation & DRM_MODE_REFLECT_X) 576 + cfg |= GSC_IN_ROT_YFLIP; 548 577 break; 549 - case EXYNOS_DRM_DEGREE_180: 578 + case DRM_MODE_ROTATE_180: 550 579 cfg |= GSC_IN_ROT_180; 551 - if (flip & EXYNOS_DRM_FLIP_VERTICAL) 580 + if (rotation & DRM_MODE_REFLECT_Y) 552 581 cfg &= ~GSC_IN_ROT_XFLIP; 553 - if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) 582 + if (rotation & DRM_MODE_REFLECT_X) 554 583 cfg &= ~GSC_IN_ROT_YFLIP; 555 584 break; 556 - case EXYNOS_DRM_DEGREE_270: 585 + case DRM_MODE_ROTATE_270: 557 586 cfg |= GSC_IN_ROT_270; 558 - if (flip & EXYNOS_DRM_FLIP_VERTICAL) 587 + if (rotation & DRM_MODE_REFLECT_Y) 559 588 cfg &= ~GSC_IN_ROT_XFLIP; 560 - if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) 589 + if (rotation & DRM_MODE_REFLECT_X) 561 590 cfg &= ~GSC_IN_ROT_YFLIP; 562 591 break; 563 - default: 564 - dev_err(ippdrv->dev, "invalid degree value %d.\n", degree); 565 - return -EINVAL; 566 592 } 567 593 568 594 gsc_write(cfg, GSC_IN_CON); 569 595 570 596 ctx->rotation = (cfg & GSC_IN_ROT_90) ? 1 : 0; 571 - *swap = ctx->rotation; 572 - 573 - return 0; 574 597 } 575 598 576 - static int gsc_src_set_size(struct device *dev, int swap, 577 - struct drm_exynos_pos *pos, struct drm_exynos_sz *sz) 599 + static void gsc_src_set_size(struct gsc_context *ctx, 600 + struct exynos_drm_ipp_buffer *buf) 578 601 { 579 - struct gsc_context *ctx = get_gsc_context(dev); 580 - struct drm_exynos_pos img_pos = *pos; 581 602 struct gsc_scaler *sc = &ctx->sc; 582 603 u32 cfg; 583 604 584 - DRM_DEBUG_KMS("swap[%d]x[%d]y[%d]w[%d]h[%d]\n", 585 - swap, pos->x, pos->y, pos->w, pos->h); 586 - 587 - if (swap) { 588 - img_pos.w = pos->h; 589 - img_pos.h = pos->w; 590 - } 591 - 592 605 /* pixel offset */ 593 - cfg = (GSC_SRCIMG_OFFSET_X(img_pos.x) | 594 - GSC_SRCIMG_OFFSET_Y(img_pos.y)); 606 + cfg = (GSC_SRCIMG_OFFSET_X(buf->rect.x) | 607 + GSC_SRCIMG_OFFSET_Y(buf->rect.y)); 595 608 gsc_write(cfg, GSC_SRCIMG_OFFSET); 596 609 597 610 /* cropped size */ 598 - cfg = (GSC_CROPPED_WIDTH(img_pos.w) | 599 - GSC_CROPPED_HEIGHT(img_pos.h)); 611 + cfg = (GSC_CROPPED_WIDTH(buf->rect.w) | 612 + GSC_CROPPED_HEIGHT(buf->rect.h)); 600 613 gsc_write(cfg, GSC_CROPPED_SIZE); 601 - 602 - DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", sz->hsize, sz->vsize); 603 614 604 615 /* original size */ 605 616 cfg = gsc_read(GSC_SRCIMG_SIZE); 606 617 cfg &= ~(GSC_SRCIMG_HEIGHT_MASK | 607 618 GSC_SRCIMG_WIDTH_MASK); 608 619 609 - cfg |= (GSC_SRCIMG_WIDTH(sz->hsize) | 610 - GSC_SRCIMG_HEIGHT(sz->vsize)); 620 + cfg |= (GSC_SRCIMG_WIDTH(buf->buf.width) | 621 + GSC_SRCIMG_HEIGHT(buf->buf.height)); 611 622 612 623 gsc_write(cfg, GSC_SRCIMG_SIZE); 613 624 614 625 cfg = gsc_read(GSC_IN_CON); 615 626 cfg &= ~GSC_IN_RGB_TYPE_MASK; 616 627 617 - DRM_DEBUG_KMS("width[%d]range[%d]\n", pos->w, sc->range); 618 - 619 - if (pos->w >= GSC_WIDTH_ITU_709) 628 + if (buf->rect.w >= GSC_WIDTH_ITU_709) 620 629 if (sc->range) 621 630 cfg |= GSC_IN_RGB_HD_WIDE; 622 631 else ··· 597 668 cfg |= GSC_IN_RGB_SD_NARROW; 598 669 599 670 gsc_write(cfg, GSC_IN_CON); 600 - 601 - return 0; 602 671 } 603 672 604 - static int gsc_src_set_buf_seq(struct gsc_context *ctx, u32 buf_id, 605 - enum drm_exynos_ipp_buf_type buf_type) 673 + static void gsc_src_set_buf_seq(struct gsc_context *ctx, u32 buf_id, 674 + bool enqueue) 606 675 { 607 - struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 608 - bool masked; 676 + bool masked = !enqueue; 609 677 u32 cfg; 610 678 u32 mask = 0x00000001 << buf_id; 611 679 612 - DRM_DEBUG_KMS("buf_id[%d]buf_type[%d]\n", buf_id, buf_type); 613 - 614 680 /* mask register set */ 615 681 cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK); 616 - 617 - switch (buf_type) { 618 - case IPP_BUF_ENQUEUE: 619 - masked = false; 620 - break; 621 - case IPP_BUF_DEQUEUE: 622 - masked = true; 623 - break; 624 - default: 625 - dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n"); 626 - return -EINVAL; 627 - } 628 682 629 683 /* sequence id */ 630 684 cfg &= ~mask; ··· 615 703 gsc_write(cfg, GSC_IN_BASE_ADDR_Y_MASK); 616 704 gsc_write(cfg, GSC_IN_BASE_ADDR_CB_MASK); 617 705 gsc_write(cfg, GSC_IN_BASE_ADDR_CR_MASK); 618 - 619 - return 0; 620 706 } 621 707 622 - static int gsc_src_set_addr(struct device *dev, 623 - struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id, 624 - enum drm_exynos_ipp_buf_type buf_type) 708 + static void gsc_src_set_addr(struct gsc_context *ctx, u32 buf_id, 709 + struct exynos_drm_ipp_buffer *buf) 625 710 { 626 - struct gsc_context *ctx = get_gsc_context(dev); 627 - struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 628 - struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node; 629 - struct drm_exynos_ipp_property *property; 630 - 631 - if (!c_node) { 632 - DRM_ERROR("failed to get c_node.\n"); 633 - return -EFAULT; 634 - } 635 - 636 - property = &c_node->property; 637 - 638 - DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]buf_type[%d]\n", 639 - property->prop_id, buf_id, buf_type); 640 - 641 - if (buf_id > GSC_MAX_SRC) { 642 - dev_info(ippdrv->dev, "invalid buf_id %d.\n", buf_id); 643 - return -EINVAL; 644 - } 645 - 646 711 /* address register set */ 647 - switch (buf_type) { 648 - case IPP_BUF_ENQUEUE: 649 - gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y], 650 - GSC_IN_BASE_ADDR_Y(buf_id)); 651 - gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB], 652 - GSC_IN_BASE_ADDR_CB(buf_id)); 653 - gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR], 654 - GSC_IN_BASE_ADDR_CR(buf_id)); 655 - break; 656 - case IPP_BUF_DEQUEUE: 657 - gsc_write(0x0, GSC_IN_BASE_ADDR_Y(buf_id)); 658 - gsc_write(0x0, GSC_IN_BASE_ADDR_CB(buf_id)); 659 - gsc_write(0x0, GSC_IN_BASE_ADDR_CR(buf_id)); 660 - break; 661 - default: 662 - /* bypass */ 663 - break; 664 - } 712 + gsc_write(buf->dma_addr[0], GSC_IN_BASE_ADDR_Y(buf_id)); 713 + gsc_write(buf->dma_addr[1], GSC_IN_BASE_ADDR_CB(buf_id)); 714 + gsc_write(buf->dma_addr[2], GSC_IN_BASE_ADDR_CR(buf_id)); 665 715 666 - return gsc_src_set_buf_seq(ctx, buf_id, buf_type); 716 + gsc_src_set_buf_seq(ctx, buf_id, true); 667 717 } 668 718 669 - static struct exynos_drm_ipp_ops gsc_src_ops = { 670 - .set_fmt = gsc_src_set_fmt, 671 - .set_transf = gsc_src_set_transf, 672 - .set_size = gsc_src_set_size, 673 - .set_addr = gsc_src_set_addr, 674 - }; 675 - 676 - static int gsc_dst_set_fmt(struct device *dev, u32 fmt) 719 + static void gsc_dst_set_fmt(struct gsc_context *ctx, u32 fmt) 677 720 { 678 - struct gsc_context *ctx = get_gsc_context(dev); 679 - struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 680 721 u32 cfg; 681 722 682 723 DRM_DEBUG_KMS("fmt[0x%x]\n", fmt); ··· 644 779 case DRM_FORMAT_RGB565: 645 780 cfg |= GSC_OUT_RGB565; 646 781 break; 782 + case DRM_FORMAT_ARGB8888: 647 783 case DRM_FORMAT_XRGB8888: 648 - cfg |= GSC_OUT_XRGB8888; 784 + cfg |= (GSC_OUT_XRGB8888 | GSC_OUT_GLOBAL_ALPHA(0xff)); 649 785 break; 650 786 case DRM_FORMAT_BGRX8888: 651 787 cfg |= (GSC_OUT_XRGB8888 | GSC_OUT_RB_SWAP); ··· 685 819 cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | 686 820 GSC_OUT_YUV420_2P); 687 821 break; 688 - default: 689 - dev_err(ippdrv->dev, "invalid target yuv order 0x%x.\n", fmt); 690 - return -EINVAL; 691 822 } 692 823 693 824 gsc_write(cfg, GSC_OUT_CON); 694 - 695 - return 0; 696 - } 697 - 698 - static int gsc_dst_set_transf(struct device *dev, 699 - enum drm_exynos_degree degree, 700 - enum drm_exynos_flip flip, bool *swap) 701 - { 702 - struct gsc_context *ctx = get_gsc_context(dev); 703 - struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 704 - u32 cfg; 705 - 706 - DRM_DEBUG_KMS("degree[%d]flip[0x%x]\n", degree, flip); 707 - 708 - cfg = gsc_read(GSC_IN_CON); 709 - cfg &= ~GSC_IN_ROT_MASK; 710 - 711 - switch (degree) { 712 - case EXYNOS_DRM_DEGREE_0: 713 - if (flip & EXYNOS_DRM_FLIP_VERTICAL) 714 - cfg |= GSC_IN_ROT_XFLIP; 715 - if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) 716 - cfg |= GSC_IN_ROT_YFLIP; 717 - break; 718 - case EXYNOS_DRM_DEGREE_90: 719 - if (flip & EXYNOS_DRM_FLIP_VERTICAL) 720 - cfg |= GSC_IN_ROT_90_XFLIP; 721 - else if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) 722 - cfg |= GSC_IN_ROT_90_YFLIP; 723 - else 724 - cfg |= GSC_IN_ROT_90; 725 - break; 726 - case EXYNOS_DRM_DEGREE_180: 727 - cfg |= GSC_IN_ROT_180; 728 - if (flip & EXYNOS_DRM_FLIP_VERTICAL) 729 - cfg &= ~GSC_IN_ROT_XFLIP; 730 - if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) 731 - cfg &= ~GSC_IN_ROT_YFLIP; 732 - break; 733 - case EXYNOS_DRM_DEGREE_270: 734 - cfg |= GSC_IN_ROT_270; 735 - if (flip & EXYNOS_DRM_FLIP_VERTICAL) 736 - cfg &= ~GSC_IN_ROT_XFLIP; 737 - if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) 738 - cfg &= ~GSC_IN_ROT_YFLIP; 739 - break; 740 - default: 741 - dev_err(ippdrv->dev, "invalid degree value %d.\n", degree); 742 - return -EINVAL; 743 - } 744 - 745 - gsc_write(cfg, GSC_IN_CON); 746 - 747 - ctx->rotation = (cfg & GSC_IN_ROT_90) ? 1 : 0; 748 - *swap = ctx->rotation; 749 - 750 - return 0; 751 825 } 752 826 753 827 static int gsc_get_ratio_shift(u32 src, u32 dst, u32 *ratio) ··· 725 919 } 726 920 727 921 static int gsc_set_prescaler(struct gsc_context *ctx, struct gsc_scaler *sc, 728 - struct drm_exynos_pos *src, struct drm_exynos_pos *dst) 922 + struct drm_exynos_ipp_task_rect *src, 923 + struct drm_exynos_ipp_task_rect *dst) 729 924 { 730 - struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 731 925 u32 cfg; 732 926 u32 src_w, src_h, dst_w, dst_h; 733 927 int ret = 0; ··· 745 939 746 940 ret = gsc_get_ratio_shift(src_w, dst_w, &sc->pre_hratio); 747 941 if (ret) { 748 - dev_err(ippdrv->dev, "failed to get ratio horizontal.\n"); 942 + dev_err(ctx->dev, "failed to get ratio horizontal.\n"); 749 943 return ret; 750 944 } 751 945 752 946 ret = gsc_get_ratio_shift(src_h, dst_h, &sc->pre_vratio); 753 947 if (ret) { 754 - dev_err(ippdrv->dev, "failed to get ratio vertical.\n"); 948 + dev_err(ctx->dev, "failed to get ratio vertical.\n"); 755 949 return ret; 756 950 } 757 951 ··· 845 1039 gsc_write(cfg, GSC_MAIN_V_RATIO); 846 1040 } 847 1041 848 - static int gsc_dst_set_size(struct device *dev, int swap, 849 - struct drm_exynos_pos *pos, struct drm_exynos_sz *sz) 1042 + static void gsc_dst_set_size(struct gsc_context *ctx, 1043 + struct exynos_drm_ipp_buffer *buf) 850 1044 { 851 - struct gsc_context *ctx = get_gsc_context(dev); 852 - struct drm_exynos_pos img_pos = *pos; 853 1045 struct gsc_scaler *sc = &ctx->sc; 854 1046 u32 cfg; 855 1047 856 - DRM_DEBUG_KMS("swap[%d]x[%d]y[%d]w[%d]h[%d]\n", 857 - swap, pos->x, pos->y, pos->w, pos->h); 858 - 859 - if (swap) { 860 - img_pos.w = pos->h; 861 - img_pos.h = pos->w; 862 - } 863 - 864 1048 /* pixel offset */ 865 - cfg = (GSC_DSTIMG_OFFSET_X(pos->x) | 866 - GSC_DSTIMG_OFFSET_Y(pos->y)); 1049 + cfg = (GSC_DSTIMG_OFFSET_X(buf->rect.x) | 1050 + GSC_DSTIMG_OFFSET_Y(buf->rect.y)); 867 1051 gsc_write(cfg, GSC_DSTIMG_OFFSET); 868 1052 869 1053 /* scaled size */ 870 - cfg = (GSC_SCALED_WIDTH(img_pos.w) | GSC_SCALED_HEIGHT(img_pos.h)); 1054 + if (ctx->rotation) 1055 + cfg = (GSC_SCALED_WIDTH(buf->rect.h) | 1056 + GSC_SCALED_HEIGHT(buf->rect.w)); 1057 + else 1058 + cfg = (GSC_SCALED_WIDTH(buf->rect.w) | 1059 + GSC_SCALED_HEIGHT(buf->rect.h)); 871 1060 gsc_write(cfg, GSC_SCALED_SIZE); 872 - 873 - DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", sz->hsize, sz->vsize); 874 1061 875 1062 /* original size */ 876 1063 cfg = gsc_read(GSC_DSTIMG_SIZE); 877 - cfg &= ~(GSC_DSTIMG_HEIGHT_MASK | 878 - GSC_DSTIMG_WIDTH_MASK); 879 - cfg |= (GSC_DSTIMG_WIDTH(sz->hsize) | 880 - GSC_DSTIMG_HEIGHT(sz->vsize)); 1064 + cfg &= ~(GSC_DSTIMG_HEIGHT_MASK | GSC_DSTIMG_WIDTH_MASK); 1065 + cfg |= GSC_DSTIMG_WIDTH(buf->buf.width) | 1066 + GSC_DSTIMG_HEIGHT(buf->buf.height); 881 1067 gsc_write(cfg, GSC_DSTIMG_SIZE); 882 1068 883 1069 cfg = gsc_read(GSC_OUT_CON); 884 1070 cfg &= ~GSC_OUT_RGB_TYPE_MASK; 885 1071 886 - DRM_DEBUG_KMS("width[%d]range[%d]\n", pos->w, sc->range); 887 - 888 - if (pos->w >= GSC_WIDTH_ITU_709) 1072 + if (buf->rect.w >= GSC_WIDTH_ITU_709) 889 1073 if (sc->range) 890 1074 cfg |= GSC_OUT_RGB_HD_WIDE; 891 1075 else ··· 887 1091 cfg |= GSC_OUT_RGB_SD_NARROW; 888 1092 889 1093 gsc_write(cfg, GSC_OUT_CON); 890 - 891 - return 0; 892 1094 } 893 1095 894 1096 static int gsc_dst_get_buf_seq(struct gsc_context *ctx) ··· 905 1111 return buf_num; 906 1112 } 907 1113 908 - static int gsc_dst_set_buf_seq(struct gsc_context *ctx, u32 buf_id, 909 - enum drm_exynos_ipp_buf_type buf_type) 1114 + static void gsc_dst_set_buf_seq(struct gsc_context *ctx, u32 buf_id, 1115 + bool enqueue) 910 1116 { 911 - struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 912 - bool masked; 1117 + bool masked = !enqueue; 913 1118 u32 cfg; 914 1119 u32 mask = 0x00000001 << buf_id; 915 - int ret = 0; 916 - 917 - DRM_DEBUG_KMS("buf_id[%d]buf_type[%d]\n", buf_id, buf_type); 918 - 919 - mutex_lock(&ctx->lock); 920 1120 921 1121 /* mask register set */ 922 1122 cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK); 923 - 924 - switch (buf_type) { 925 - case IPP_BUF_ENQUEUE: 926 - masked = false; 927 - break; 928 - case IPP_BUF_DEQUEUE: 929 - masked = true; 930 - break; 931 - default: 932 - dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n"); 933 - ret = -EINVAL; 934 - goto err_unlock; 935 - } 936 1123 937 1124 /* sequence id */ 938 1125 cfg &= ~mask; ··· 923 1148 gsc_write(cfg, GSC_OUT_BASE_ADDR_CR_MASK); 924 1149 925 1150 /* interrupt enable */ 926 - if (buf_type == IPP_BUF_ENQUEUE && 927 - gsc_dst_get_buf_seq(ctx) >= GSC_BUF_START) 1151 + if (enqueue && gsc_dst_get_buf_seq(ctx) >= GSC_BUF_START) 928 1152 gsc_handle_irq(ctx, true, false, true); 929 1153 930 1154 /* interrupt disable */ 931 - if (buf_type == IPP_BUF_DEQUEUE && 932 - gsc_dst_get_buf_seq(ctx) <= GSC_BUF_STOP) 1155 + if (!enqueue && gsc_dst_get_buf_seq(ctx) <= GSC_BUF_STOP) 933 1156 gsc_handle_irq(ctx, false, false, true); 934 - 935 - err_unlock: 936 - mutex_unlock(&ctx->lock); 937 - return ret; 938 1157 } 939 1158 940 - static int gsc_dst_set_addr(struct device *dev, 941 - struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id, 942 - enum drm_exynos_ipp_buf_type buf_type) 1159 + static void gsc_dst_set_addr(struct gsc_context *ctx, 1160 + u32 buf_id, struct exynos_drm_ipp_buffer *buf) 943 1161 { 944 - struct gsc_context *ctx = get_gsc_context(dev); 945 - struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 946 - struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node; 947 - struct drm_exynos_ipp_property *property; 948 - 949 - if (!c_node) { 950 - DRM_ERROR("failed to get c_node.\n"); 951 - return -EFAULT; 952 - } 953 - 954 - property = &c_node->property; 955 - 956 - DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]buf_type[%d]\n", 957 - property->prop_id, buf_id, buf_type); 958 - 959 - if (buf_id > GSC_MAX_DST) { 960 - dev_info(ippdrv->dev, "invalid buf_id %d.\n", buf_id); 961 - return -EINVAL; 962 - } 963 - 964 1162 /* address register set */ 965 - switch (buf_type) { 966 - case IPP_BUF_ENQUEUE: 967 - gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y], 968 - GSC_OUT_BASE_ADDR_Y(buf_id)); 969 - gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB], 970 - GSC_OUT_BASE_ADDR_CB(buf_id)); 971 - gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR], 972 - GSC_OUT_BASE_ADDR_CR(buf_id)); 973 - break; 974 - case IPP_BUF_DEQUEUE: 975 - gsc_write(0x0, GSC_OUT_BASE_ADDR_Y(buf_id)); 976 - gsc_write(0x0, GSC_OUT_BASE_ADDR_CB(buf_id)); 977 - gsc_write(0x0, GSC_OUT_BASE_ADDR_CR(buf_id)); 978 - break; 979 - default: 980 - /* bypass */ 981 - break; 982 - } 1163 + gsc_write(buf->dma_addr[0], GSC_OUT_BASE_ADDR_Y(buf_id)); 1164 + gsc_write(buf->dma_addr[1], GSC_OUT_BASE_ADDR_CB(buf_id)); 1165 + gsc_write(buf->dma_addr[2], GSC_OUT_BASE_ADDR_CR(buf_id)); 983 1166 984 - return gsc_dst_set_buf_seq(ctx, buf_id, buf_type); 985 - } 986 - 987 - static struct exynos_drm_ipp_ops gsc_dst_ops = { 988 - .set_fmt = gsc_dst_set_fmt, 989 - .set_transf = gsc_dst_set_transf, 990 - .set_size = gsc_dst_set_size, 991 - .set_addr = gsc_dst_set_addr, 992 - }; 993 - 994 - static int gsc_clk_ctrl(struct gsc_context *ctx, bool enable) 995 - { 996 - DRM_DEBUG_KMS("enable[%d]\n", enable); 997 - 998 - if (enable) { 999 - clk_prepare_enable(ctx->gsc_clk); 1000 - ctx->suspended = false; 1001 - } else { 1002 - clk_disable_unprepare(ctx->gsc_clk); 1003 - ctx->suspended = true; 1004 - } 1005 - 1006 - return 0; 1167 + gsc_dst_set_buf_seq(ctx, buf_id, true); 1007 1168 } 1008 1169 1009 1170 static int gsc_get_src_buf_index(struct gsc_context *ctx) 1010 1171 { 1011 1172 u32 cfg, curr_index, i; 1012 1173 u32 buf_id = GSC_MAX_SRC; 1013 - int ret; 1014 1174 1015 1175 DRM_DEBUG_KMS("gsc id[%d]\n", ctx->id); 1016 1176 ··· 959 1249 } 960 1250 } 961 1251 1252 + DRM_DEBUG_KMS("cfg[0x%x]curr_index[%d]buf_id[%d]\n", cfg, 1253 + curr_index, buf_id); 1254 + 962 1255 if (buf_id == GSC_MAX_SRC) { 963 1256 DRM_ERROR("failed to get in buffer index.\n"); 964 1257 return -EINVAL; 965 1258 } 966 1259 967 - ret = gsc_src_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE); 968 - if (ret < 0) { 969 - DRM_ERROR("failed to dequeue.\n"); 970 - return ret; 971 - } 972 - 973 - DRM_DEBUG_KMS("cfg[0x%x]curr_index[%d]buf_id[%d]\n", cfg, 974 - curr_index, buf_id); 1260 + gsc_src_set_buf_seq(ctx, buf_id, false); 975 1261 976 1262 return buf_id; 977 1263 } ··· 976 1270 { 977 1271 u32 cfg, curr_index, i; 978 1272 u32 buf_id = GSC_MAX_DST; 979 - int ret; 980 1273 981 1274 DRM_DEBUG_KMS("gsc id[%d]\n", ctx->id); 982 1275 ··· 994 1289 return -EINVAL; 995 1290 } 996 1291 997 - ret = gsc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE); 998 - if (ret < 0) { 999 - DRM_ERROR("failed to dequeue.\n"); 1000 - return ret; 1001 - } 1292 + gsc_dst_set_buf_seq(ctx, buf_id, false); 1002 1293 1003 1294 DRM_DEBUG_KMS("cfg[0x%x]curr_index[%d]buf_id[%d]\n", cfg, 1004 1295 curr_index, buf_id); ··· 1005 1304 static irqreturn_t gsc_irq_handler(int irq, void *dev_id) 1006 1305 { 1007 1306 struct gsc_context *ctx = dev_id; 1008 - struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 1009 - struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node; 1010 - struct drm_exynos_ipp_event_work *event_work = 1011 - c_node->event_work; 1012 1307 u32 status; 1013 - int buf_id[EXYNOS_DRM_OPS_MAX]; 1308 + int err = 0; 1014 1309 1015 1310 DRM_DEBUG_KMS("gsc id[%d]\n", ctx->id); 1016 1311 1017 1312 status = gsc_read(GSC_IRQ); 1018 1313 if (status & GSC_IRQ_STATUS_OR_IRQ) { 1019 - dev_err(ippdrv->dev, "occurred overflow at %d, status 0x%x.\n", 1314 + dev_err(ctx->dev, "occurred overflow at %d, status 0x%x.\n", 1020 1315 ctx->id, status); 1021 - return IRQ_NONE; 1316 + err = -EINVAL; 1022 1317 } 1023 1318 1024 1319 if (status & GSC_IRQ_STATUS_OR_FRM_DONE) { 1025 - dev_dbg(ippdrv->dev, "occurred frame done at %d, status 0x%x.\n", 1320 + int src_buf_id, dst_buf_id; 1321 + 1322 + dev_dbg(ctx->dev, "occurred frame done at %d, status 0x%x.\n", 1026 1323 ctx->id, status); 1027 1324 1028 - buf_id[EXYNOS_DRM_OPS_SRC] = gsc_get_src_buf_index(ctx); 1029 - if (buf_id[EXYNOS_DRM_OPS_SRC] < 0) 1030 - return IRQ_HANDLED; 1325 + src_buf_id = gsc_get_src_buf_index(ctx); 1326 + dst_buf_id = gsc_get_dst_buf_index(ctx); 1031 1327 1032 - buf_id[EXYNOS_DRM_OPS_DST] = gsc_get_dst_buf_index(ctx); 1033 - if (buf_id[EXYNOS_DRM_OPS_DST] < 0) 1034 - return IRQ_HANDLED; 1328 + DRM_DEBUG_KMS("buf_id_src[%d]buf_id_dst[%d]\n", src_buf_id, 1329 + dst_buf_id); 1035 1330 1036 - DRM_DEBUG_KMS("buf_id_src[%d]buf_id_dst[%d]\n", 1037 - buf_id[EXYNOS_DRM_OPS_SRC], buf_id[EXYNOS_DRM_OPS_DST]); 1331 + if (src_buf_id < 0 || dst_buf_id < 0) 1332 + err = -EINVAL; 1333 + } 1038 1334 1039 - event_work->ippdrv = ippdrv; 1040 - event_work->buf_id[EXYNOS_DRM_OPS_SRC] = 1041 - buf_id[EXYNOS_DRM_OPS_SRC]; 1042 - event_work->buf_id[EXYNOS_DRM_OPS_DST] = 1043 - buf_id[EXYNOS_DRM_OPS_DST]; 1044 - queue_work(ippdrv->event_workq, &event_work->work); 1335 + if (ctx->task) { 1336 + struct exynos_drm_ipp_task *task = ctx->task; 1337 + 1338 + ctx->task = NULL; 1339 + pm_runtime_mark_last_busy(ctx->dev); 1340 + pm_runtime_put_autosuspend(ctx->dev); 1341 + exynos_drm_ipp_task_done(task, err); 1045 1342 } 1046 1343 1047 1344 return IRQ_HANDLED; 1048 1345 } 1049 1346 1050 - static int gsc_init_prop_list(struct exynos_drm_ippdrv *ippdrv) 1347 + static int gsc_reset(struct gsc_context *ctx) 1051 1348 { 1052 - struct drm_exynos_ipp_prop_list *prop_list = &ippdrv->prop_list; 1053 - 1054 - prop_list->version = 1; 1055 - prop_list->writeback = 1; 1056 - prop_list->refresh_min = GSC_REFRESH_MIN; 1057 - prop_list->refresh_max = GSC_REFRESH_MAX; 1058 - prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) | 1059 - (1 << EXYNOS_DRM_FLIP_HORIZONTAL); 1060 - prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) | 1061 - (1 << EXYNOS_DRM_DEGREE_90) | 1062 - (1 << EXYNOS_DRM_DEGREE_180) | 1063 - (1 << EXYNOS_DRM_DEGREE_270); 1064 - prop_list->csc = 1; 1065 - prop_list->crop = 1; 1066 - prop_list->crop_max.hsize = GSC_CROP_MAX; 1067 - prop_list->crop_max.vsize = GSC_CROP_MAX; 1068 - prop_list->crop_min.hsize = GSC_CROP_MIN; 1069 - prop_list->crop_min.vsize = GSC_CROP_MIN; 1070 - prop_list->scale = 1; 1071 - prop_list->scale_max.hsize = GSC_SCALE_MAX; 1072 - prop_list->scale_max.vsize = GSC_SCALE_MAX; 1073 - prop_list->scale_min.hsize = GSC_SCALE_MIN; 1074 - prop_list->scale_min.vsize = GSC_SCALE_MIN; 1075 - 1076 - return 0; 1077 - } 1078 - 1079 - static inline bool gsc_check_drm_flip(enum drm_exynos_flip flip) 1080 - { 1081 - switch (flip) { 1082 - case EXYNOS_DRM_FLIP_NONE: 1083 - case EXYNOS_DRM_FLIP_VERTICAL: 1084 - case EXYNOS_DRM_FLIP_HORIZONTAL: 1085 - case EXYNOS_DRM_FLIP_BOTH: 1086 - return true; 1087 - default: 1088 - DRM_DEBUG_KMS("invalid flip\n"); 1089 - return false; 1090 - } 1091 - } 1092 - 1093 - static int gsc_ippdrv_check_property(struct device *dev, 1094 - struct drm_exynos_ipp_property *property) 1095 - { 1096 - struct gsc_context *ctx = get_gsc_context(dev); 1097 - struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 1098 - struct drm_exynos_ipp_prop_list *pp = &ippdrv->prop_list; 1099 - struct drm_exynos_ipp_config *config; 1100 - struct drm_exynos_pos *pos; 1101 - struct drm_exynos_sz *sz; 1102 - bool swap; 1103 - int i; 1104 - 1105 - for_each_ipp_ops(i) { 1106 - if ((i == EXYNOS_DRM_OPS_SRC) && 1107 - (property->cmd == IPP_CMD_WB)) 1108 - continue; 1109 - 1110 - config = &property->config[i]; 1111 - pos = &config->pos; 1112 - sz = &config->sz; 1113 - 1114 - /* check for flip */ 1115 - if (!gsc_check_drm_flip(config->flip)) { 1116 - DRM_ERROR("invalid flip.\n"); 1117 - goto err_property; 1118 - } 1119 - 1120 - /* check for degree */ 1121 - switch (config->degree) { 1122 - case EXYNOS_DRM_DEGREE_90: 1123 - case EXYNOS_DRM_DEGREE_270: 1124 - swap = true; 1125 - break; 1126 - case EXYNOS_DRM_DEGREE_0: 1127 - case EXYNOS_DRM_DEGREE_180: 1128 - swap = false; 1129 - break; 1130 - default: 1131 - DRM_ERROR("invalid degree.\n"); 1132 - goto err_property; 1133 - } 1134 - 1135 - /* check for buffer bound */ 1136 - if ((pos->x + pos->w > sz->hsize) || 1137 - (pos->y + pos->h > sz->vsize)) { 1138 - DRM_ERROR("out of buf bound.\n"); 1139 - goto err_property; 1140 - } 1141 - 1142 - /* check for crop */ 1143 - if ((i == EXYNOS_DRM_OPS_SRC) && (pp->crop)) { 1144 - if (swap) { 1145 - if ((pos->h < pp->crop_min.hsize) || 1146 - (sz->vsize > pp->crop_max.hsize) || 1147 - (pos->w < pp->crop_min.vsize) || 1148 - (sz->hsize > pp->crop_max.vsize)) { 1149 - DRM_ERROR("out of crop size.\n"); 1150 - goto err_property; 1151 - } 1152 - } else { 1153 - if ((pos->w < pp->crop_min.hsize) || 1154 - (sz->hsize > pp->crop_max.hsize) || 1155 - (pos->h < pp->crop_min.vsize) || 1156 - (sz->vsize > pp->crop_max.vsize)) { 1157 - DRM_ERROR("out of crop size.\n"); 1158 - goto err_property; 1159 - } 1160 - } 1161 - } 1162 - 1163 - /* check for scale */ 1164 - if ((i == EXYNOS_DRM_OPS_DST) && (pp->scale)) { 1165 - if (swap) { 1166 - if ((pos->h < pp->scale_min.hsize) || 1167 - (sz->vsize > pp->scale_max.hsize) || 1168 - (pos->w < pp->scale_min.vsize) || 1169 - (sz->hsize > pp->scale_max.vsize)) { 1170 - DRM_ERROR("out of scale size.\n"); 1171 - goto err_property; 1172 - } 1173 - } else { 1174 - if ((pos->w < pp->scale_min.hsize) || 1175 - (sz->hsize > pp->scale_max.hsize) || 1176 - (pos->h < pp->scale_min.vsize) || 1177 - (sz->vsize > pp->scale_max.vsize)) { 1178 - DRM_ERROR("out of scale size.\n"); 1179 - goto err_property; 1180 - } 1181 - } 1182 - } 1183 - } 1184 - 1185 - return 0; 1186 - 1187 - err_property: 1188 - for_each_ipp_ops(i) { 1189 - if ((i == EXYNOS_DRM_OPS_SRC) && 1190 - (property->cmd == IPP_CMD_WB)) 1191 - continue; 1192 - 1193 - config = &property->config[i]; 1194 - pos = &config->pos; 1195 - sz = &config->sz; 1196 - 1197 - DRM_ERROR("[%s]f[%d]r[%d]pos[%d %d %d %d]sz[%d %d]\n", 1198 - i ? "dst" : "src", config->flip, config->degree, 1199 - pos->x, pos->y, pos->w, pos->h, 1200 - sz->hsize, sz->vsize); 1201 - } 1202 - 1203 - return -EINVAL; 1204 - } 1205 - 1206 - 1207 - static int gsc_ippdrv_reset(struct device *dev) 1208 - { 1209 - struct gsc_context *ctx = get_gsc_context(dev); 1210 1349 struct gsc_scaler *sc = &ctx->sc; 1211 1350 int ret; 1212 1351 1213 1352 /* reset h/w block */ 1214 1353 ret = gsc_sw_reset(ctx); 1215 1354 if (ret < 0) { 1216 - dev_err(dev, "failed to reset hardware.\n"); 1355 + dev_err(ctx->dev, "failed to reset hardware.\n"); 1217 1356 return ret; 1218 1357 } 1219 1358 ··· 1064 1523 return 0; 1065 1524 } 1066 1525 1067 - static int gsc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd) 1526 + static void gsc_start(struct gsc_context *ctx) 1068 1527 { 1069 - struct gsc_context *ctx = get_gsc_context(dev); 1070 - struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 1071 - struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node; 1072 - struct drm_exynos_ipp_property *property; 1073 - struct drm_exynos_ipp_config *config; 1074 - struct drm_exynos_pos img_pos[EXYNOS_DRM_OPS_MAX]; 1075 - struct drm_exynos_ipp_set_wb set_wb; 1076 1528 u32 cfg; 1077 - int ret, i; 1078 - 1079 - DRM_DEBUG_KMS("cmd[%d]\n", cmd); 1080 - 1081 - if (!c_node) { 1082 - DRM_ERROR("failed to get c_node.\n"); 1083 - return -EINVAL; 1084 - } 1085 - 1086 - property = &c_node->property; 1087 1529 1088 1530 gsc_handle_irq(ctx, true, false, true); 1089 1531 1090 - for_each_ipp_ops(i) { 1091 - config = &property->config[i]; 1092 - img_pos[i] = config->pos; 1093 - } 1532 + /* enable one shot */ 1533 + cfg = gsc_read(GSC_ENABLE); 1534 + cfg &= ~(GSC_ENABLE_ON_CLEAR_MASK | 1535 + GSC_ENABLE_CLK_GATE_MODE_MASK); 1536 + cfg |= GSC_ENABLE_ON_CLEAR_ONESHOT; 1537 + gsc_write(cfg, GSC_ENABLE); 1094 1538 1095 - switch (cmd) { 1096 - case IPP_CMD_M2M: 1097 - /* enable one shot */ 1098 - cfg = gsc_read(GSC_ENABLE); 1099 - cfg &= ~(GSC_ENABLE_ON_CLEAR_MASK | 1100 - GSC_ENABLE_CLK_GATE_MODE_MASK); 1101 - cfg |= GSC_ENABLE_ON_CLEAR_ONESHOT; 1102 - gsc_write(cfg, GSC_ENABLE); 1539 + /* src dma memory */ 1540 + cfg = gsc_read(GSC_IN_CON); 1541 + cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK); 1542 + cfg |= GSC_IN_PATH_MEMORY; 1543 + gsc_write(cfg, GSC_IN_CON); 1103 1544 1104 - /* src dma memory */ 1105 - cfg = gsc_read(GSC_IN_CON); 1106 - cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK); 1107 - cfg |= GSC_IN_PATH_MEMORY; 1108 - gsc_write(cfg, GSC_IN_CON); 1109 - 1110 - /* dst dma memory */ 1111 - cfg = gsc_read(GSC_OUT_CON); 1112 - cfg |= GSC_OUT_PATH_MEMORY; 1113 - gsc_write(cfg, GSC_OUT_CON); 1114 - break; 1115 - case IPP_CMD_WB: 1116 - set_wb.enable = 1; 1117 - set_wb.refresh = property->refresh_rate; 1118 - gsc_set_gscblk_fimd_wb(ctx, set_wb.enable); 1119 - exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb); 1120 - 1121 - /* src local path */ 1122 - cfg = gsc_read(GSC_IN_CON); 1123 - cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK); 1124 - cfg |= (GSC_IN_PATH_LOCAL | GSC_IN_LOCAL_FIMD_WB); 1125 - gsc_write(cfg, GSC_IN_CON); 1126 - 1127 - /* dst dma memory */ 1128 - cfg = gsc_read(GSC_OUT_CON); 1129 - cfg |= GSC_OUT_PATH_MEMORY; 1130 - gsc_write(cfg, GSC_OUT_CON); 1131 - break; 1132 - case IPP_CMD_OUTPUT: 1133 - /* src dma memory */ 1134 - cfg = gsc_read(GSC_IN_CON); 1135 - cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK); 1136 - cfg |= GSC_IN_PATH_MEMORY; 1137 - gsc_write(cfg, GSC_IN_CON); 1138 - 1139 - /* dst local path */ 1140 - cfg = gsc_read(GSC_OUT_CON); 1141 - cfg |= GSC_OUT_PATH_MEMORY; 1142 - gsc_write(cfg, GSC_OUT_CON); 1143 - break; 1144 - default: 1145 - ret = -EINVAL; 1146 - dev_err(dev, "invalid operations.\n"); 1147 - return ret; 1148 - } 1149 - 1150 - ret = gsc_set_prescaler(ctx, &ctx->sc, 1151 - &img_pos[EXYNOS_DRM_OPS_SRC], 1152 - &img_pos[EXYNOS_DRM_OPS_DST]); 1153 - if (ret) { 1154 - dev_err(dev, "failed to set prescaler.\n"); 1155 - return ret; 1156 - } 1545 + /* dst dma memory */ 1546 + cfg = gsc_read(GSC_OUT_CON); 1547 + cfg |= GSC_OUT_PATH_MEMORY; 1548 + gsc_write(cfg, GSC_OUT_CON); 1157 1549 1158 1550 gsc_set_scaler(ctx, &ctx->sc); 1159 1551 1160 1552 cfg = gsc_read(GSC_ENABLE); 1161 1553 cfg |= GSC_ENABLE_ON; 1162 1554 gsc_write(cfg, GSC_ENABLE); 1555 + } 1556 + 1557 + static int gsc_commit(struct exynos_drm_ipp *ipp, 1558 + struct exynos_drm_ipp_task *task) 1559 + { 1560 + struct gsc_context *ctx = container_of(ipp, struct gsc_context, ipp); 1561 + int ret; 1562 + 1563 + pm_runtime_get_sync(ctx->dev); 1564 + ctx->task = task; 1565 + 1566 + ret = gsc_reset(ctx); 1567 + if (ret) { 1568 + pm_runtime_put_autosuspend(ctx->dev); 1569 + ctx->task = NULL; 1570 + return ret; 1571 + } 1572 + 1573 + gsc_src_set_fmt(ctx, task->src.buf.fourcc); 1574 + gsc_src_set_transf(ctx, task->transform.rotation); 1575 + gsc_src_set_size(ctx, &task->src); 1576 + gsc_src_set_addr(ctx, 0, &task->src); 1577 + gsc_dst_set_fmt(ctx, task->dst.buf.fourcc); 1578 + gsc_dst_set_size(ctx, &task->dst); 1579 + gsc_dst_set_addr(ctx, 0, &task->dst); 1580 + gsc_set_prescaler(ctx, &ctx->sc, &task->src.rect, &task->dst.rect); 1581 + gsc_start(ctx); 1163 1582 1164 1583 return 0; 1165 1584 } 1166 1585 1167 - static void gsc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd) 1586 + static void gsc_abort(struct exynos_drm_ipp *ipp, 1587 + struct exynos_drm_ipp_task *task) 1168 1588 { 1169 - struct gsc_context *ctx = get_gsc_context(dev); 1170 - struct drm_exynos_ipp_set_wb set_wb = {0, 0}; 1171 - u32 cfg; 1589 + struct gsc_context *ctx = 1590 + container_of(ipp, struct gsc_context, ipp); 1172 1591 1173 - DRM_DEBUG_KMS("cmd[%d]\n", cmd); 1592 + gsc_reset(ctx); 1593 + if (ctx->task) { 1594 + struct exynos_drm_ipp_task *task = ctx->task; 1174 1595 1175 - switch (cmd) { 1176 - case IPP_CMD_M2M: 1177 - /* bypass */ 1178 - break; 1179 - case IPP_CMD_WB: 1180 - gsc_set_gscblk_fimd_wb(ctx, set_wb.enable); 1181 - exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb); 1182 - break; 1183 - case IPP_CMD_OUTPUT: 1184 - default: 1185 - dev_err(dev, "invalid operations.\n"); 1186 - break; 1596 + ctx->task = NULL; 1597 + pm_runtime_mark_last_busy(ctx->dev); 1598 + pm_runtime_put_autosuspend(ctx->dev); 1599 + exynos_drm_ipp_task_done(task, -EIO); 1187 1600 } 1188 - 1189 - gsc_handle_irq(ctx, false, false, true); 1190 - 1191 - /* reset sequence */ 1192 - gsc_write(0xff, GSC_OUT_BASE_ADDR_Y_MASK); 1193 - gsc_write(0xff, GSC_OUT_BASE_ADDR_CB_MASK); 1194 - gsc_write(0xff, GSC_OUT_BASE_ADDR_CR_MASK); 1195 - 1196 - cfg = gsc_read(GSC_ENABLE); 1197 - cfg &= ~GSC_ENABLE_ON; 1198 - gsc_write(cfg, GSC_ENABLE); 1199 1601 } 1602 + 1603 + static struct exynos_drm_ipp_funcs ipp_funcs = { 1604 + .commit = gsc_commit, 1605 + .abort = gsc_abort, 1606 + }; 1607 + 1608 + static int gsc_bind(struct device *dev, struct device *master, void *data) 1609 + { 1610 + struct gsc_context *ctx = dev_get_drvdata(dev); 1611 + struct drm_device *drm_dev = data; 1612 + struct exynos_drm_ipp *ipp = &ctx->ipp; 1613 + 1614 + ctx->drm_dev = drm_dev; 1615 + drm_iommu_attach_device(drm_dev, dev); 1616 + 1617 + exynos_drm_ipp_register(drm_dev, ipp, &ipp_funcs, 1618 + DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE | 1619 + DRM_EXYNOS_IPP_CAP_SCALE | DRM_EXYNOS_IPP_CAP_CONVERT, 1620 + ctx->formats, ctx->num_formats, "gsc"); 1621 + 1622 + dev_info(dev, "The exynos gscaler has been probed successfully\n"); 1623 + 1624 + return 0; 1625 + } 1626 + 1627 + static void gsc_unbind(struct device *dev, struct device *master, 1628 + void *data) 1629 + { 1630 + struct gsc_context *ctx = dev_get_drvdata(dev); 1631 + struct drm_device *drm_dev = data; 1632 + struct exynos_drm_ipp *ipp = &ctx->ipp; 1633 + 1634 + exynos_drm_ipp_unregister(drm_dev, ipp); 1635 + drm_iommu_detach_device(drm_dev, dev); 1636 + } 1637 + 1638 + static const struct component_ops gsc_component_ops = { 1639 + .bind = gsc_bind, 1640 + .unbind = gsc_unbind, 1641 + }; 1642 + 1643 + static const unsigned int gsc_formats[] = { 1644 + DRM_FORMAT_ARGB8888, 1645 + DRM_FORMAT_XRGB8888, DRM_FORMAT_RGB565, DRM_FORMAT_BGRX8888, 1646 + DRM_FORMAT_NV12, DRM_FORMAT_NV16, DRM_FORMAT_NV21, DRM_FORMAT_NV61, 1647 + DRM_FORMAT_UYVY, DRM_FORMAT_VYUY, DRM_FORMAT_YUYV, DRM_FORMAT_YVYU, 1648 + DRM_FORMAT_YUV420, DRM_FORMAT_YVU420, DRM_FORMAT_YUV422, 1649 + }; 1200 1650 1201 1651 static int gsc_probe(struct platform_device *pdev) 1202 1652 { 1203 1653 struct device *dev = &pdev->dev; 1654 + struct gsc_driverdata *driver_data; 1655 + struct exynos_drm_ipp_formats *formats; 1204 1656 struct gsc_context *ctx; 1205 1657 struct resource *res; 1206 - struct exynos_drm_ippdrv *ippdrv; 1207 - int ret; 1658 + int ret, i; 1208 1659 1209 1660 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); 1210 1661 if (!ctx) 1211 1662 return -ENOMEM; 1212 1663 1213 - if (dev->of_node) { 1214 - ctx->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node, 1215 - "samsung,sysreg"); 1216 - if (IS_ERR(ctx->sysreg)) { 1217 - dev_warn(dev, "failed to get system register.\n"); 1218 - ctx->sysreg = NULL; 1219 - } 1664 + formats = devm_kzalloc(dev, sizeof(*formats) * 1665 + (ARRAY_SIZE(gsc_formats)), GFP_KERNEL); 1666 + if (!formats) 1667 + return -ENOMEM; 1668 + 1669 + driver_data = (struct gsc_driverdata *)of_device_get_match_data(dev); 1670 + ctx->dev = dev; 1671 + ctx->num_clocks = driver_data->num_clocks; 1672 + ctx->clk_names = driver_data->clk_names; 1673 + 1674 + for (i = 0; i < ARRAY_SIZE(gsc_formats); i++) { 1675 + formats[i].fourcc = gsc_formats[i]; 1676 + formats[i].type = DRM_EXYNOS_IPP_FORMAT_SOURCE | 1677 + DRM_EXYNOS_IPP_FORMAT_DESTINATION; 1678 + formats[i].limits = driver_data->limits; 1679 + formats[i].num_limits = driver_data->num_limits; 1220 1680 } 1681 + ctx->formats = formats; 1682 + ctx->num_formats = ARRAY_SIZE(gsc_formats); 1221 1683 1222 1684 /* clock control */ 1223 - ctx->gsc_clk = devm_clk_get(dev, "gscl"); 1224 - if (IS_ERR(ctx->gsc_clk)) { 1225 - dev_err(dev, "failed to get gsc clock.\n"); 1226 - return PTR_ERR(ctx->gsc_clk); 1685 + for (i = 0; i < ctx->num_clocks; i++) { 1686 + ctx->clocks[i] = devm_clk_get(dev, ctx->clk_names[i]); 1687 + if (IS_ERR(ctx->clocks[i])) { 1688 + dev_err(dev, "failed to get clock: %s\n", 1689 + ctx->clk_names[i]); 1690 + return PTR_ERR(ctx->clocks[i]); 1691 + } 1227 1692 } 1228 1693 1229 1694 /* resource memory */ ··· 1246 1699 } 1247 1700 1248 1701 ctx->irq = res->start; 1249 - ret = devm_request_threaded_irq(dev, ctx->irq, NULL, gsc_irq_handler, 1250 - IRQF_ONESHOT, "drm_gsc", ctx); 1702 + ret = devm_request_irq(dev, ctx->irq, gsc_irq_handler, 0, 1703 + dev_name(dev), ctx); 1251 1704 if (ret < 0) { 1252 1705 dev_err(dev, "failed to request irq.\n"); 1253 1706 return ret; ··· 1256 1709 /* context initailization */ 1257 1710 ctx->id = pdev->id; 1258 1711 1259 - ippdrv = &ctx->ippdrv; 1260 - ippdrv->dev = dev; 1261 - ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &gsc_src_ops; 1262 - ippdrv->ops[EXYNOS_DRM_OPS_DST] = &gsc_dst_ops; 1263 - ippdrv->check_property = gsc_ippdrv_check_property; 1264 - ippdrv->reset = gsc_ippdrv_reset; 1265 - ippdrv->start = gsc_ippdrv_start; 1266 - ippdrv->stop = gsc_ippdrv_stop; 1267 - ret = gsc_init_prop_list(ippdrv); 1268 - if (ret < 0) { 1269 - dev_err(dev, "failed to init property list.\n"); 1270 - return ret; 1271 - } 1272 - 1273 - DRM_DEBUG_KMS("id[%d]ippdrv[%pK]\n", ctx->id, ippdrv); 1274 - 1275 - mutex_init(&ctx->lock); 1276 1712 platform_set_drvdata(pdev, ctx); 1277 1713 1714 + pm_runtime_use_autosuspend(dev); 1715 + pm_runtime_set_autosuspend_delay(dev, GSC_AUTOSUSPEND_DELAY); 1278 1716 pm_runtime_enable(dev); 1279 1717 1280 - ret = exynos_drm_ippdrv_register(ippdrv); 1281 - if (ret < 0) { 1282 - dev_err(dev, "failed to register drm gsc device.\n"); 1283 - goto err_ippdrv_register; 1284 - } 1718 + ret = component_add(dev, &gsc_component_ops); 1719 + if (ret) 1720 + goto err_pm_dis; 1285 1721 1286 1722 dev_info(dev, "drm gsc registered successfully.\n"); 1287 1723 1288 1724 return 0; 1289 1725 1290 - err_ippdrv_register: 1726 + err_pm_dis: 1727 + pm_runtime_dont_use_autosuspend(dev); 1291 1728 pm_runtime_disable(dev); 1292 1729 return ret; 1293 1730 } ··· 1279 1748 static int gsc_remove(struct platform_device *pdev) 1280 1749 { 1281 1750 struct device *dev = &pdev->dev; 1282 - struct gsc_context *ctx = get_gsc_context(dev); 1283 - struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 1284 1751 1285 - exynos_drm_ippdrv_unregister(ippdrv); 1286 - mutex_destroy(&ctx->lock); 1287 - 1288 - pm_runtime_set_suspended(dev); 1752 + pm_runtime_dont_use_autosuspend(dev); 1289 1753 pm_runtime_disable(dev); 1290 1754 1291 1755 return 0; ··· 1289 1763 static int __maybe_unused gsc_runtime_suspend(struct device *dev) 1290 1764 { 1291 1765 struct gsc_context *ctx = get_gsc_context(dev); 1766 + int i; 1292 1767 1293 1768 DRM_DEBUG_KMS("id[%d]\n", ctx->id); 1294 1769 1295 - return gsc_clk_ctrl(ctx, false); 1770 + for (i = ctx->num_clocks - 1; i >= 0; i--) 1771 + clk_disable_unprepare(ctx->clocks[i]); 1772 + 1773 + return 0; 1296 1774 } 1297 1775 1298 1776 static int __maybe_unused gsc_runtime_resume(struct device *dev) 1299 1777 { 1300 1778 struct gsc_context *ctx = get_gsc_context(dev); 1779 + int i, ret; 1301 1780 1302 1781 DRM_DEBUG_KMS("id[%d]\n", ctx->id); 1303 1782 1304 - return gsc_clk_ctrl(ctx, true); 1783 + for (i = 0; i < ctx->num_clocks; i++) { 1784 + ret = clk_prepare_enable(ctx->clocks[i]); 1785 + if (ret) { 1786 + while (--i > 0) 1787 + clk_disable_unprepare(ctx->clocks[i]); 1788 + return ret; 1789 + } 1790 + } 1791 + return 0; 1305 1792 } 1306 1793 1307 1794 static const struct dev_pm_ops gsc_pm_ops = { ··· 1323 1784 SET_RUNTIME_PM_OPS(gsc_runtime_suspend, gsc_runtime_resume, NULL) 1324 1785 }; 1325 1786 1787 + static const struct drm_exynos_ipp_limit gsc_5250_limits[] = { 1788 + { IPP_SIZE_LIMIT(BUFFER, .h = { 32, 4800, 8 }, .v = { 16, 3344, 8 }) }, 1789 + { IPP_SIZE_LIMIT(AREA, .h = { 16, 4800, 2 }, .v = { 8, 3344, 2 }) }, 1790 + { IPP_SIZE_LIMIT(ROTATED, .h = { 32, 2048 }, .v = { 16, 2048 }) }, 1791 + { IPP_SCALE_LIMIT(.h = { (1 << 16) / 16, (1 << 16) * 8 }, 1792 + .v = { (1 << 16) / 16, (1 << 16) * 8 }) }, 1793 + }; 1794 + 1795 + static const struct drm_exynos_ipp_limit gsc_5420_limits[] = { 1796 + { IPP_SIZE_LIMIT(BUFFER, .h = { 32, 4800, 8 }, .v = { 16, 3344, 8 }) }, 1797 + { IPP_SIZE_LIMIT(AREA, .h = { 16, 4800, 2 }, .v = { 8, 3344, 2 }) }, 1798 + { IPP_SIZE_LIMIT(ROTATED, .h = { 16, 2016 }, .v = { 8, 2016 }) }, 1799 + { IPP_SCALE_LIMIT(.h = { (1 << 16) / 16, (1 << 16) * 8 }, 1800 + .v = { (1 << 16) / 16, (1 << 16) * 8 }) }, 1801 + }; 1802 + 1803 + static const struct drm_exynos_ipp_limit gsc_5433_limits[] = { 1804 + { IPP_SIZE_LIMIT(BUFFER, .h = { 32, 8191, 2 }, .v = { 16, 8191, 2 }) }, 1805 + { IPP_SIZE_LIMIT(AREA, .h = { 16, 4800, 1 }, .v = { 8, 3344, 1 }) }, 1806 + { IPP_SIZE_LIMIT(ROTATED, .h = { 32, 2047 }, .v = { 8, 8191 }) }, 1807 + { IPP_SCALE_LIMIT(.h = { (1 << 16) / 16, (1 << 16) * 8 }, 1808 + .v = { (1 << 16) / 16, (1 << 16) * 8 }) }, 1809 + }; 1810 + 1811 + static struct gsc_driverdata gsc_exynos5250_drvdata = { 1812 + .clk_names = {"gscl"}, 1813 + .num_clocks = 1, 1814 + .limits = gsc_5250_limits, 1815 + .num_limits = ARRAY_SIZE(gsc_5250_limits), 1816 + }; 1817 + 1818 + static struct gsc_driverdata gsc_exynos5420_drvdata = { 1819 + .clk_names = {"gscl"}, 1820 + .num_clocks = 1, 1821 + .limits = gsc_5420_limits, 1822 + .num_limits = ARRAY_SIZE(gsc_5420_limits), 1823 + }; 1824 + 1825 + static struct gsc_driverdata gsc_exynos5433_drvdata = { 1826 + .clk_names = {"pclk", "aclk", "aclk_xiu", "aclk_gsclbend"}, 1827 + .num_clocks = 4, 1828 + .limits = gsc_5433_limits, 1829 + .num_limits = ARRAY_SIZE(gsc_5433_limits), 1830 + }; 1831 + 1326 1832 static const struct of_device_id exynos_drm_gsc_of_match[] = { 1327 - { .compatible = "samsung,exynos5-gsc" }, 1328 - { }, 1833 + { 1834 + .compatible = "samsung,exynos5-gsc", 1835 + .data = &gsc_exynos5250_drvdata, 1836 + }, { 1837 + .compatible = "samsung,exynos5250-gsc", 1838 + .data = &gsc_exynos5250_drvdata, 1839 + }, { 1840 + .compatible = "samsung,exynos5420-gsc", 1841 + .data = &gsc_exynos5420_drvdata, 1842 + }, { 1843 + .compatible = "samsung,exynos5433-gsc", 1844 + .data = &gsc_exynos5433_drvdata, 1845 + }, { 1846 + }, 1329 1847 }; 1330 1848 MODULE_DEVICE_TABLE(of, exynos_drm_gsc_of_match); 1331 1849 ··· 1396 1800 .of_match_table = of_match_ptr(exynos_drm_gsc_of_match), 1397 1801 }, 1398 1802 }; 1399 -
-24
drivers/gpu/drm/exynos/exynos_drm_gsc.h
··· 1 - /* 2 - * Copyright (c) 2012 Samsung Electronics Co., Ltd. 3 - * 4 - * Authors: 5 - * Eunchul Kim <chulspro.kim@samsung.com> 6 - * Jinyoung Jeon <jy0.jeon@samsung.com> 7 - * Sangmin Lee <lsmin.lee@samsung.com> 8 - * 9 - * This program is free software; you can redistribute it and/or modify it 10 - * under the terms of the GNU General Public License as published by the 11 - * Free Software Foundation; either version 2 of the License, or (at your 12 - * option) any later version. 13 - */ 14 - 15 - #ifndef _EXYNOS_DRM_GSC_H_ 16 - #define _EXYNOS_DRM_GSC_H_ 17 - 18 - /* 19 - * TODO 20 - * FIMD output interface notifier callback. 21 - * Mixer output interface notifier callback. 22 - */ 23 - 24 - #endif /* _EXYNOS_DRM_GSC_H_ */
+916
drivers/gpu/drm/exynos/exynos_drm_ipp.c
··· 1 + /* 2 + * Copyright (C) 2017 Samsung Electronics Co.Ltd 3 + * Authors: 4 + * Marek Szyprowski <m.szyprowski@samsung.com> 5 + * 6 + * Exynos DRM Image Post Processing (IPP) related functions 7 + * 8 + * Permission is hereby granted, free of charge, to any person obtaining a 9 + * copy of this software and associated documentation files (the "Software"), 10 + * to deal in the Software without restriction, including without limitation 11 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 12 + * and/or sell copies of the Software, and to permit persons to whom the 13 + * Software is furnished to do so, subject to the following conditions: 14 + * 15 + * The above copyright notice and this permission notice shall be included in 16 + * all copies or substantial portions of the Software. 17 + */ 18 + 19 + 20 + #include <drm/drmP.h> 21 + #include <drm/drm_mode.h> 22 + #include <uapi/drm/exynos_drm.h> 23 + 24 + #include "exynos_drm_drv.h" 25 + #include "exynos_drm_gem.h" 26 + #include "exynos_drm_ipp.h" 27 + 28 + static int num_ipp; 29 + static LIST_HEAD(ipp_list); 30 + 31 + /** 32 + * exynos_drm_ipp_register - Register a new picture processor hardware module 33 + * @dev: DRM device 34 + * @ipp: ipp module to init 35 + * @funcs: callbacks for the new ipp object 36 + * @caps: bitmask of ipp capabilities (%DRM_EXYNOS_IPP_CAP_*) 37 + * @formats: array of supported formats 38 + * @num_formats: size of the supported formats array 39 + * @name: name (for debugging purposes) 40 + * 41 + * Initializes a ipp module. 42 + * 43 + * Returns: 44 + * Zero on success, error code on failure. 45 + */ 46 + int exynos_drm_ipp_register(struct drm_device *dev, struct exynos_drm_ipp *ipp, 47 + const struct exynos_drm_ipp_funcs *funcs, unsigned int caps, 48 + const struct exynos_drm_ipp_formats *formats, 49 + unsigned int num_formats, const char *name) 50 + { 51 + WARN_ON(!ipp); 52 + WARN_ON(!funcs); 53 + WARN_ON(!formats); 54 + WARN_ON(!num_formats); 55 + 56 + spin_lock_init(&ipp->lock); 57 + INIT_LIST_HEAD(&ipp->todo_list); 58 + init_waitqueue_head(&ipp->done_wq); 59 + ipp->dev = dev; 60 + ipp->funcs = funcs; 61 + ipp->capabilities = caps; 62 + ipp->name = name; 63 + ipp->formats = formats; 64 + ipp->num_formats = num_formats; 65 + 66 + /* ipp_list modification is serialized by component framework */ 67 + list_add_tail(&ipp->head, &ipp_list); 68 + ipp->id = num_ipp++; 69 + 70 + DRM_DEBUG_DRIVER("Registered ipp %d\n", ipp->id); 71 + 72 + return 0; 73 + } 74 + 75 + /** 76 + * exynos_drm_ipp_unregister - Unregister the picture processor module 77 + * @dev: DRM device 78 + * @ipp: ipp module 79 + */ 80 + void exynos_drm_ipp_unregister(struct drm_device *dev, 81 + struct exynos_drm_ipp *ipp) 82 + { 83 + WARN_ON(ipp->task); 84 + WARN_ON(!list_empty(&ipp->todo_list)); 85 + list_del(&ipp->head); 86 + } 87 + 88 + /** 89 + * exynos_drm_ipp_ioctl_get_res_ioctl - enumerate all ipp modules 90 + * @dev: DRM device 91 + * @data: ioctl data 92 + * @file_priv: DRM file info 93 + * 94 + * Construct a list of ipp ids. 95 + * 96 + * Called by the user via ioctl. 97 + * 98 + * Returns: 99 + * Zero on success, negative errno on failure. 100 + */ 101 + int exynos_drm_ipp_get_res_ioctl(struct drm_device *dev, void *data, 102 + struct drm_file *file_priv) 103 + { 104 + struct drm_exynos_ioctl_ipp_get_res *resp = data; 105 + struct exynos_drm_ipp *ipp; 106 + uint32_t __user *ipp_ptr = (uint32_t __user *) 107 + (unsigned long)resp->ipp_id_ptr; 108 + unsigned int count = num_ipp, copied = 0; 109 + 110 + /* 111 + * This ioctl is called twice, once to determine how much space is 112 + * needed, and the 2nd time to fill it. 113 + */ 114 + if (count && resp->count_ipps >= count) { 115 + list_for_each_entry(ipp, &ipp_list, head) { 116 + if (put_user(ipp->id, ipp_ptr + copied)) 117 + return -EFAULT; 118 + copied++; 119 + } 120 + } 121 + resp->count_ipps = count; 122 + 123 + return 0; 124 + } 125 + 126 + static inline struct exynos_drm_ipp *__ipp_get(uint32_t id) 127 + { 128 + struct exynos_drm_ipp *ipp; 129 + 130 + list_for_each_entry(ipp, &ipp_list, head) 131 + if (ipp->id == id) 132 + return ipp; 133 + return NULL; 134 + } 135 + 136 + /** 137 + * exynos_drm_ipp_ioctl_get_caps - get ipp module capabilities and formats 138 + * @dev: DRM device 139 + * @data: ioctl data 140 + * @file_priv: DRM file info 141 + * 142 + * Construct a structure describing ipp module capabilities. 143 + * 144 + * Called by the user via ioctl. 145 + * 146 + * Returns: 147 + * Zero on success, negative errno on failure. 148 + */ 149 + int exynos_drm_ipp_get_caps_ioctl(struct drm_device *dev, void *data, 150 + struct drm_file *file_priv) 151 + { 152 + struct drm_exynos_ioctl_ipp_get_caps *resp = data; 153 + void __user *ptr = (void __user *)(unsigned long)resp->formats_ptr; 154 + struct exynos_drm_ipp *ipp; 155 + int i; 156 + 157 + ipp = __ipp_get(resp->ipp_id); 158 + if (!ipp) 159 + return -ENOENT; 160 + 161 + resp->ipp_id = ipp->id; 162 + resp->capabilities = ipp->capabilities; 163 + 164 + /* 165 + * This ioctl is called twice, once to determine how much space is 166 + * needed, and the 2nd time to fill it. 167 + */ 168 + if (resp->formats_count >= ipp->num_formats) { 169 + for (i = 0; i < ipp->num_formats; i++) { 170 + struct drm_exynos_ipp_format tmp = { 171 + .fourcc = ipp->formats[i].fourcc, 172 + .type = ipp->formats[i].type, 173 + .modifier = ipp->formats[i].modifier, 174 + }; 175 + 176 + if (copy_to_user(ptr, &tmp, sizeof(tmp))) 177 + return -EFAULT; 178 + ptr += sizeof(tmp); 179 + } 180 + } 181 + resp->formats_count = ipp->num_formats; 182 + 183 + return 0; 184 + } 185 + 186 + static inline const struct exynos_drm_ipp_formats *__ipp_format_get( 187 + struct exynos_drm_ipp *ipp, uint32_t fourcc, 188 + uint64_t mod, unsigned int type) 189 + { 190 + int i; 191 + 192 + for (i = 0; i < ipp->num_formats; i++) { 193 + if ((ipp->formats[i].type & type) && 194 + ipp->formats[i].fourcc == fourcc && 195 + ipp->formats[i].modifier == mod) 196 + return &ipp->formats[i]; 197 + } 198 + return NULL; 199 + } 200 + 201 + /** 202 + * exynos_drm_ipp_get_limits_ioctl - get ipp module limits 203 + * @dev: DRM device 204 + * @data: ioctl data 205 + * @file_priv: DRM file info 206 + * 207 + * Construct a structure describing ipp module limitations for provided 208 + * picture format. 209 + * 210 + * Called by the user via ioctl. 211 + * 212 + * Returns: 213 + * Zero on success, negative errno on failure. 214 + */ 215 + int exynos_drm_ipp_get_limits_ioctl(struct drm_device *dev, void *data, 216 + struct drm_file *file_priv) 217 + { 218 + struct drm_exynos_ioctl_ipp_get_limits *resp = data; 219 + void __user *ptr = (void __user *)(unsigned long)resp->limits_ptr; 220 + const struct exynos_drm_ipp_formats *format; 221 + struct exynos_drm_ipp *ipp; 222 + 223 + if (resp->type != DRM_EXYNOS_IPP_FORMAT_SOURCE && 224 + resp->type != DRM_EXYNOS_IPP_FORMAT_DESTINATION) 225 + return -EINVAL; 226 + 227 + ipp = __ipp_get(resp->ipp_id); 228 + if (!ipp) 229 + return -ENOENT; 230 + 231 + format = __ipp_format_get(ipp, resp->fourcc, resp->modifier, 232 + resp->type); 233 + if (!format) 234 + return -EINVAL; 235 + 236 + /* 237 + * This ioctl is called twice, once to determine how much space is 238 + * needed, and the 2nd time to fill it. 239 + */ 240 + if (format->num_limits && resp->limits_count >= format->num_limits) 241 + if (copy_to_user((void __user *)ptr, format->limits, 242 + sizeof(*format->limits) * format->num_limits)) 243 + return -EFAULT; 244 + resp->limits_count = format->num_limits; 245 + 246 + return 0; 247 + } 248 + 249 + struct drm_pending_exynos_ipp_event { 250 + struct drm_pending_event base; 251 + struct drm_exynos_ipp_event event; 252 + }; 253 + 254 + static inline struct exynos_drm_ipp_task * 255 + exynos_drm_ipp_task_alloc(struct exynos_drm_ipp *ipp) 256 + { 257 + struct exynos_drm_ipp_task *task; 258 + 259 + task = kzalloc(sizeof(*task), GFP_KERNEL); 260 + if (!task) 261 + return NULL; 262 + 263 + task->dev = ipp->dev; 264 + task->ipp = ipp; 265 + 266 + /* some defaults */ 267 + task->src.rect.w = task->dst.rect.w = UINT_MAX; 268 + task->src.rect.h = task->dst.rect.h = UINT_MAX; 269 + task->transform.rotation = DRM_MODE_ROTATE_0; 270 + 271 + DRM_DEBUG_DRIVER("Allocated task %pK\n", task); 272 + 273 + return task; 274 + } 275 + 276 + static const struct exynos_drm_param_map { 277 + unsigned int id; 278 + unsigned int size; 279 + unsigned int offset; 280 + } exynos_drm_ipp_params_maps[] = { 281 + { 282 + DRM_EXYNOS_IPP_TASK_BUFFER | DRM_EXYNOS_IPP_TASK_TYPE_SOURCE, 283 + sizeof(struct drm_exynos_ipp_task_buffer), 284 + offsetof(struct exynos_drm_ipp_task, src.buf), 285 + }, { 286 + DRM_EXYNOS_IPP_TASK_BUFFER | 287 + DRM_EXYNOS_IPP_TASK_TYPE_DESTINATION, 288 + sizeof(struct drm_exynos_ipp_task_buffer), 289 + offsetof(struct exynos_drm_ipp_task, dst.buf), 290 + }, { 291 + DRM_EXYNOS_IPP_TASK_RECTANGLE | DRM_EXYNOS_IPP_TASK_TYPE_SOURCE, 292 + sizeof(struct drm_exynos_ipp_task_rect), 293 + offsetof(struct exynos_drm_ipp_task, src.rect), 294 + }, { 295 + DRM_EXYNOS_IPP_TASK_RECTANGLE | 296 + DRM_EXYNOS_IPP_TASK_TYPE_DESTINATION, 297 + sizeof(struct drm_exynos_ipp_task_rect), 298 + offsetof(struct exynos_drm_ipp_task, dst.rect), 299 + }, { 300 + DRM_EXYNOS_IPP_TASK_TRANSFORM, 301 + sizeof(struct drm_exynos_ipp_task_transform), 302 + offsetof(struct exynos_drm_ipp_task, transform), 303 + }, { 304 + DRM_EXYNOS_IPP_TASK_ALPHA, 305 + sizeof(struct drm_exynos_ipp_task_alpha), 306 + offsetof(struct exynos_drm_ipp_task, alpha), 307 + }, 308 + }; 309 + 310 + static int exynos_drm_ipp_task_set(struct exynos_drm_ipp_task *task, 311 + struct drm_exynos_ioctl_ipp_commit *arg) 312 + { 313 + const struct exynos_drm_param_map *map = exynos_drm_ipp_params_maps; 314 + void __user *params = (void __user *)(unsigned long)arg->params_ptr; 315 + unsigned int size = arg->params_size; 316 + uint32_t id; 317 + int i; 318 + 319 + while (size) { 320 + if (get_user(id, (uint32_t __user *)params)) 321 + return -EFAULT; 322 + 323 + for (i = 0; i < ARRAY_SIZE(exynos_drm_ipp_params_maps); i++) 324 + if (map[i].id == id) 325 + break; 326 + if (i == ARRAY_SIZE(exynos_drm_ipp_params_maps) || 327 + map[i].size > size) 328 + return -EINVAL; 329 + 330 + if (copy_from_user((void *)task + map[i].offset, params, 331 + map[i].size)) 332 + return -EFAULT; 333 + 334 + params += map[i].size; 335 + size -= map[i].size; 336 + } 337 + 338 + DRM_DEBUG_DRIVER("Got task %pK configuration from userspace\n", task); 339 + return 0; 340 + } 341 + 342 + static int exynos_drm_ipp_task_setup_buffer(struct exynos_drm_ipp_buffer *buf, 343 + struct drm_file *filp) 344 + { 345 + int ret = 0; 346 + int i; 347 + 348 + /* basic checks */ 349 + if (buf->buf.width == 0 || buf->buf.height == 0) 350 + return -EINVAL; 351 + buf->format = drm_format_info(buf->buf.fourcc); 352 + for (i = 0; i < buf->format->num_planes; i++) { 353 + unsigned int width = (i == 0) ? buf->buf.width : 354 + DIV_ROUND_UP(buf->buf.width, buf->format->hsub); 355 + 356 + if (buf->buf.pitch[i] == 0) 357 + buf->buf.pitch[i] = width * buf->format->cpp[i]; 358 + if (buf->buf.pitch[i] < width * buf->format->cpp[i]) 359 + return -EINVAL; 360 + if (!buf->buf.gem_id[i]) 361 + return -ENOENT; 362 + } 363 + 364 + /* pitch for additional planes must match */ 365 + if (buf->format->num_planes > 2 && 366 + buf->buf.pitch[1] != buf->buf.pitch[2]) 367 + return -EINVAL; 368 + 369 + /* get GEM buffers and check their size */ 370 + for (i = 0; i < buf->format->num_planes; i++) { 371 + unsigned int height = (i == 0) ? buf->buf.height : 372 + DIV_ROUND_UP(buf->buf.height, buf->format->vsub); 373 + unsigned long size = height * buf->buf.pitch[i]; 374 + struct drm_gem_object *obj = drm_gem_object_lookup(filp, 375 + buf->buf.gem_id[i]); 376 + if (!obj) { 377 + ret = -ENOENT; 378 + goto gem_free; 379 + } 380 + buf->exynos_gem[i] = to_exynos_gem(obj); 381 + 382 + if (size + buf->buf.offset[i] > buf->exynos_gem[i]->size) { 383 + i++; 384 + ret = -EINVAL; 385 + goto gem_free; 386 + } 387 + buf->dma_addr[i] = buf->exynos_gem[i]->dma_addr + 388 + buf->buf.offset[i]; 389 + } 390 + 391 + return 0; 392 + gem_free: 393 + while (i--) { 394 + drm_gem_object_put_unlocked(&buf->exynos_gem[i]->base); 395 + buf->exynos_gem[i] = NULL; 396 + } 397 + return ret; 398 + } 399 + 400 + static void exynos_drm_ipp_task_release_buf(struct exynos_drm_ipp_buffer *buf) 401 + { 402 + int i; 403 + 404 + if (!buf->exynos_gem[0]) 405 + return; 406 + for (i = 0; i < buf->format->num_planes; i++) 407 + drm_gem_object_put_unlocked(&buf->exynos_gem[i]->base); 408 + } 409 + 410 + static void exynos_drm_ipp_task_free(struct exynos_drm_ipp *ipp, 411 + struct exynos_drm_ipp_task *task) 412 + { 413 + DRM_DEBUG_DRIVER("Freeing task %pK\n", task); 414 + 415 + exynos_drm_ipp_task_release_buf(&task->src); 416 + exynos_drm_ipp_task_release_buf(&task->dst); 417 + if (task->event) 418 + drm_event_cancel_free(ipp->dev, &task->event->base); 419 + kfree(task); 420 + } 421 + 422 + struct drm_ipp_limit { 423 + struct drm_exynos_ipp_limit_val h; 424 + struct drm_exynos_ipp_limit_val v; 425 + }; 426 + 427 + enum drm_ipp_size_id { 428 + IPP_LIMIT_BUFFER, IPP_LIMIT_AREA, IPP_LIMIT_ROTATED, IPP_LIMIT_MAX 429 + }; 430 + 431 + static const enum drm_ipp_size_id limit_id_fallback[IPP_LIMIT_MAX][4] = { 432 + [IPP_LIMIT_BUFFER] = { DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER }, 433 + [IPP_LIMIT_AREA] = { DRM_EXYNOS_IPP_LIMIT_SIZE_AREA, 434 + DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER }, 435 + [IPP_LIMIT_ROTATED] = { DRM_EXYNOS_IPP_LIMIT_SIZE_ROTATED, 436 + DRM_EXYNOS_IPP_LIMIT_SIZE_AREA, 437 + DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER }, 438 + }; 439 + 440 + static inline void __limit_set_val(unsigned int *ptr, unsigned int val) 441 + { 442 + if (!*ptr) 443 + *ptr = val; 444 + } 445 + 446 + static void __get_size_limit(const struct drm_exynos_ipp_limit *limits, 447 + unsigned int num_limits, enum drm_ipp_size_id id, 448 + struct drm_ipp_limit *res) 449 + { 450 + const struct drm_exynos_ipp_limit *l = limits; 451 + int i = 0; 452 + 453 + memset(res, 0, sizeof(*res)); 454 + for (i = 0; limit_id_fallback[id][i]; i++) 455 + for (l = limits; l - limits < num_limits; l++) { 456 + if (((l->type & DRM_EXYNOS_IPP_LIMIT_TYPE_MASK) != 457 + DRM_EXYNOS_IPP_LIMIT_TYPE_SIZE) || 458 + ((l->type & DRM_EXYNOS_IPP_LIMIT_SIZE_MASK) != 459 + limit_id_fallback[id][i])) 460 + continue; 461 + __limit_set_val(&res->h.min, l->h.min); 462 + __limit_set_val(&res->h.max, l->h.max); 463 + __limit_set_val(&res->h.align, l->h.align); 464 + __limit_set_val(&res->v.min, l->v.min); 465 + __limit_set_val(&res->v.max, l->v.max); 466 + __limit_set_val(&res->v.align, l->v.align); 467 + } 468 + } 469 + 470 + static inline bool __align_check(unsigned int val, unsigned int align) 471 + { 472 + if (align && (val & (align - 1))) { 473 + DRM_DEBUG_DRIVER("Value %d exceeds HW limits (align %d)\n", 474 + val, align); 475 + return false; 476 + } 477 + return true; 478 + } 479 + 480 + static inline bool __size_limit_check(unsigned int val, 481 + struct drm_exynos_ipp_limit_val *l) 482 + { 483 + if ((l->min && val < l->min) || (l->max && val > l->max)) { 484 + DRM_DEBUG_DRIVER("Value %d exceeds HW limits (min %d, max %d)\n", 485 + val, l->min, l->max); 486 + return false; 487 + } 488 + return __align_check(val, l->align); 489 + } 490 + 491 + static int exynos_drm_ipp_check_size_limits(struct exynos_drm_ipp_buffer *buf, 492 + const struct drm_exynos_ipp_limit *limits, unsigned int num_limits, 493 + bool rotate, bool swap) 494 + { 495 + enum drm_ipp_size_id id = rotate ? IPP_LIMIT_ROTATED : IPP_LIMIT_AREA; 496 + struct drm_ipp_limit l; 497 + struct drm_exynos_ipp_limit_val *lh = &l.h, *lv = &l.v; 498 + 499 + if (!limits) 500 + return 0; 501 + 502 + __get_size_limit(limits, num_limits, IPP_LIMIT_BUFFER, &l); 503 + if (!__size_limit_check(buf->buf.width, &l.h) || 504 + !__size_limit_check(buf->buf.height, &l.v)) 505 + return -EINVAL; 506 + 507 + if (swap) { 508 + lv = &l.h; 509 + lh = &l.v; 510 + } 511 + __get_size_limit(limits, num_limits, id, &l); 512 + if (!__size_limit_check(buf->rect.w, lh) || 513 + !__align_check(buf->rect.x, lh->align) || 514 + !__size_limit_check(buf->rect.h, lv) || 515 + !__align_check(buf->rect.y, lv->align)) 516 + return -EINVAL; 517 + 518 + return 0; 519 + } 520 + 521 + static inline bool __scale_limit_check(unsigned int src, unsigned int dst, 522 + unsigned int min, unsigned int max) 523 + { 524 + if ((max && (dst << 16) > src * max) || 525 + (min && (dst << 16) < src * min)) { 526 + DRM_DEBUG_DRIVER("Scale from %d to %d exceeds HW limits (ratio min %d.%05d, max %d.%05d)\n", 527 + src, dst, 528 + min >> 16, 100000 * (min & 0xffff) / (1 << 16), 529 + max >> 16, 100000 * (max & 0xffff) / (1 << 16)); 530 + return false; 531 + } 532 + return true; 533 + } 534 + 535 + static int exynos_drm_ipp_check_scale_limits( 536 + struct drm_exynos_ipp_task_rect *src, 537 + struct drm_exynos_ipp_task_rect *dst, 538 + const struct drm_exynos_ipp_limit *limits, 539 + unsigned int num_limits, bool swap) 540 + { 541 + const struct drm_exynos_ipp_limit_val *lh, *lv; 542 + int dw, dh; 543 + 544 + for (; num_limits; limits++, num_limits--) 545 + if ((limits->type & DRM_EXYNOS_IPP_LIMIT_TYPE_MASK) == 546 + DRM_EXYNOS_IPP_LIMIT_TYPE_SCALE) 547 + break; 548 + if (!num_limits) 549 + return 0; 550 + 551 + lh = (!swap) ? &limits->h : &limits->v; 552 + lv = (!swap) ? &limits->v : &limits->h; 553 + dw = (!swap) ? dst->w : dst->h; 554 + dh = (!swap) ? dst->h : dst->w; 555 + 556 + if (!__scale_limit_check(src->w, dw, lh->min, lh->max) || 557 + !__scale_limit_check(src->h, dh, lv->min, lv->max)) 558 + return -EINVAL; 559 + 560 + return 0; 561 + } 562 + 563 + static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task) 564 + { 565 + struct exynos_drm_ipp *ipp = task->ipp; 566 + const struct exynos_drm_ipp_formats *src_fmt, *dst_fmt; 567 + struct exynos_drm_ipp_buffer *src = &task->src, *dst = &task->dst; 568 + unsigned int rotation = task->transform.rotation; 569 + int ret = 0; 570 + bool swap = drm_rotation_90_or_270(rotation); 571 + bool rotate = (rotation != DRM_MODE_ROTATE_0); 572 + bool scale = false; 573 + 574 + DRM_DEBUG_DRIVER("Checking task %pK\n", task); 575 + 576 + if (src->rect.w == UINT_MAX) 577 + src->rect.w = src->buf.width; 578 + if (src->rect.h == UINT_MAX) 579 + src->rect.h = src->buf.height; 580 + if (dst->rect.w == UINT_MAX) 581 + dst->rect.w = dst->buf.width; 582 + if (dst->rect.h == UINT_MAX) 583 + dst->rect.h = dst->buf.height; 584 + 585 + if (src->rect.x + src->rect.w > (src->buf.width) || 586 + src->rect.y + src->rect.h > (src->buf.height) || 587 + dst->rect.x + dst->rect.w > (dst->buf.width) || 588 + dst->rect.y + dst->rect.h > (dst->buf.height)) { 589 + DRM_DEBUG_DRIVER("Task %pK: defined area is outside provided buffers\n", 590 + task); 591 + return -EINVAL; 592 + } 593 + 594 + if ((!swap && (src->rect.w != dst->rect.w || 595 + src->rect.h != dst->rect.h)) || 596 + (swap && (src->rect.w != dst->rect.h || 597 + src->rect.h != dst->rect.w))) 598 + scale = true; 599 + 600 + if ((!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_CROP) && 601 + (src->rect.x || src->rect.y || dst->rect.x || dst->rect.y)) || 602 + (!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_ROTATE) && rotate) || 603 + (!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_SCALE) && scale) || 604 + (!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_CONVERT) && 605 + src->buf.fourcc != dst->buf.fourcc)) { 606 + DRM_DEBUG_DRIVER("Task %pK: hw capabilities exceeded\n", task); 607 + return -EINVAL; 608 + } 609 + 610 + src_fmt = __ipp_format_get(ipp, src->buf.fourcc, src->buf.modifier, 611 + DRM_EXYNOS_IPP_FORMAT_SOURCE); 612 + if (!src_fmt) { 613 + DRM_DEBUG_DRIVER("Task %pK: src format not supported\n", task); 614 + return -EINVAL; 615 + } 616 + ret = exynos_drm_ipp_check_size_limits(src, src_fmt->limits, 617 + src_fmt->num_limits, 618 + rotate, false); 619 + if (ret) 620 + return ret; 621 + ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect, 622 + src_fmt->limits, 623 + src_fmt->num_limits, swap); 624 + if (ret) 625 + return ret; 626 + 627 + dst_fmt = __ipp_format_get(ipp, dst->buf.fourcc, dst->buf.modifier, 628 + DRM_EXYNOS_IPP_FORMAT_DESTINATION); 629 + if (!dst_fmt) { 630 + DRM_DEBUG_DRIVER("Task %pK: dst format not supported\n", task); 631 + return -EINVAL; 632 + } 633 + ret = exynos_drm_ipp_check_size_limits(dst, dst_fmt->limits, 634 + dst_fmt->num_limits, 635 + false, swap); 636 + if (ret) 637 + return ret; 638 + ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect, 639 + dst_fmt->limits, 640 + dst_fmt->num_limits, swap); 641 + if (ret) 642 + return ret; 643 + 644 + DRM_DEBUG_DRIVER("Task %pK: all checks done.\n", task); 645 + 646 + return ret; 647 + } 648 + 649 + static int exynos_drm_ipp_task_setup_buffers(struct exynos_drm_ipp_task *task, 650 + struct drm_file *filp) 651 + { 652 + struct exynos_drm_ipp_buffer *src = &task->src, *dst = &task->dst; 653 + int ret = 0; 654 + 655 + DRM_DEBUG_DRIVER("Setting buffer for task %pK\n", task); 656 + 657 + ret = exynos_drm_ipp_task_setup_buffer(src, filp); 658 + if (ret) { 659 + DRM_DEBUG_DRIVER("Task %pK: src buffer setup failed\n", task); 660 + return ret; 661 + } 662 + ret = exynos_drm_ipp_task_setup_buffer(dst, filp); 663 + if (ret) { 664 + DRM_DEBUG_DRIVER("Task %pK: dst buffer setup failed\n", task); 665 + return ret; 666 + } 667 + 668 + DRM_DEBUG_DRIVER("Task %pK: buffers prepared.\n", task); 669 + 670 + return ret; 671 + } 672 + 673 + 674 + static int exynos_drm_ipp_event_create(struct exynos_drm_ipp_task *task, 675 + struct drm_file *file_priv, uint64_t user_data) 676 + { 677 + struct drm_pending_exynos_ipp_event *e = NULL; 678 + int ret; 679 + 680 + e = kzalloc(sizeof(*e), GFP_KERNEL); 681 + if (!e) 682 + return -ENOMEM; 683 + 684 + e->event.base.type = DRM_EXYNOS_IPP_EVENT; 685 + e->event.base.length = sizeof(e->event); 686 + e->event.user_data = user_data; 687 + 688 + ret = drm_event_reserve_init(task->dev, file_priv, &e->base, 689 + &e->event.base); 690 + if (ret) 691 + goto free; 692 + 693 + task->event = e; 694 + return 0; 695 + free: 696 + kfree(e); 697 + return ret; 698 + } 699 + 700 + static void exynos_drm_ipp_event_send(struct exynos_drm_ipp_task *task) 701 + { 702 + struct timespec64 now; 703 + 704 + ktime_get_ts64(&now); 705 + task->event->event.tv_sec = now.tv_sec; 706 + task->event->event.tv_usec = now.tv_nsec / NSEC_PER_USEC; 707 + task->event->event.sequence = atomic_inc_return(&task->ipp->sequence); 708 + 709 + drm_send_event(task->dev, &task->event->base); 710 + } 711 + 712 + static int exynos_drm_ipp_task_cleanup(struct exynos_drm_ipp_task *task) 713 + { 714 + int ret = task->ret; 715 + 716 + if (ret == 0 && task->event) { 717 + exynos_drm_ipp_event_send(task); 718 + /* ensure event won't be canceled on task free */ 719 + task->event = NULL; 720 + } 721 + 722 + exynos_drm_ipp_task_free(task->ipp, task); 723 + return ret; 724 + } 725 + 726 + static void exynos_drm_ipp_cleanup_work(struct work_struct *work) 727 + { 728 + struct exynos_drm_ipp_task *task = container_of(work, 729 + struct exynos_drm_ipp_task, cleanup_work); 730 + 731 + exynos_drm_ipp_task_cleanup(task); 732 + } 733 + 734 + static void exynos_drm_ipp_next_task(struct exynos_drm_ipp *ipp); 735 + 736 + /** 737 + * exynos_drm_ipp_task_done - finish given task and set return code 738 + * @task: ipp task to finish 739 + * @ret: error code or 0 if operation has been performed successfully 740 + */ 741 + void exynos_drm_ipp_task_done(struct exynos_drm_ipp_task *task, int ret) 742 + { 743 + struct exynos_drm_ipp *ipp = task->ipp; 744 + unsigned long flags; 745 + 746 + DRM_DEBUG_DRIVER("ipp: %d, task %pK done: %d\n", ipp->id, task, ret); 747 + 748 + spin_lock_irqsave(&ipp->lock, flags); 749 + if (ipp->task == task) 750 + ipp->task = NULL; 751 + task->flags |= DRM_EXYNOS_IPP_TASK_DONE; 752 + task->ret = ret; 753 + spin_unlock_irqrestore(&ipp->lock, flags); 754 + 755 + exynos_drm_ipp_next_task(ipp); 756 + wake_up(&ipp->done_wq); 757 + 758 + if (task->flags & DRM_EXYNOS_IPP_TASK_ASYNC) { 759 + INIT_WORK(&task->cleanup_work, exynos_drm_ipp_cleanup_work); 760 + schedule_work(&task->cleanup_work); 761 + } 762 + } 763 + 764 + static void exynos_drm_ipp_next_task(struct exynos_drm_ipp *ipp) 765 + { 766 + struct exynos_drm_ipp_task *task; 767 + unsigned long flags; 768 + int ret; 769 + 770 + DRM_DEBUG_DRIVER("ipp: %d, try to run new task\n", ipp->id); 771 + 772 + spin_lock_irqsave(&ipp->lock, flags); 773 + 774 + if (ipp->task || list_empty(&ipp->todo_list)) { 775 + spin_unlock_irqrestore(&ipp->lock, flags); 776 + return; 777 + } 778 + 779 + task = list_first_entry(&ipp->todo_list, struct exynos_drm_ipp_task, 780 + head); 781 + list_del_init(&task->head); 782 + ipp->task = task; 783 + 784 + spin_unlock_irqrestore(&ipp->lock, flags); 785 + 786 + DRM_DEBUG_DRIVER("ipp: %d, selected task %pK to run\n", ipp->id, task); 787 + 788 + ret = ipp->funcs->commit(ipp, task); 789 + if (ret) 790 + exynos_drm_ipp_task_done(task, ret); 791 + } 792 + 793 + static void exynos_drm_ipp_schedule_task(struct exynos_drm_ipp *ipp, 794 + struct exynos_drm_ipp_task *task) 795 + { 796 + unsigned long flags; 797 + 798 + spin_lock_irqsave(&ipp->lock, flags); 799 + list_add(&task->head, &ipp->todo_list); 800 + spin_unlock_irqrestore(&ipp->lock, flags); 801 + 802 + exynos_drm_ipp_next_task(ipp); 803 + } 804 + 805 + static void exynos_drm_ipp_task_abort(struct exynos_drm_ipp *ipp, 806 + struct exynos_drm_ipp_task *task) 807 + { 808 + unsigned long flags; 809 + 810 + spin_lock_irqsave(&ipp->lock, flags); 811 + if (task->flags & DRM_EXYNOS_IPP_TASK_DONE) { 812 + /* already completed task */ 813 + exynos_drm_ipp_task_cleanup(task); 814 + } else if (ipp->task != task) { 815 + /* task has not been scheduled for execution yet */ 816 + list_del_init(&task->head); 817 + exynos_drm_ipp_task_cleanup(task); 818 + } else { 819 + /* 820 + * currently processed task, call abort() and perform 821 + * cleanup with async worker 822 + */ 823 + task->flags |= DRM_EXYNOS_IPP_TASK_ASYNC; 824 + spin_unlock_irqrestore(&ipp->lock, flags); 825 + if (ipp->funcs->abort) 826 + ipp->funcs->abort(ipp, task); 827 + return; 828 + } 829 + spin_unlock_irqrestore(&ipp->lock, flags); 830 + } 831 + 832 + /** 833 + * exynos_drm_ipp_commit_ioctl - perform image processing operation 834 + * @dev: DRM device 835 + * @data: ioctl data 836 + * @file_priv: DRM file info 837 + * 838 + * Construct a ipp task from the set of properties provided from the user 839 + * and try to schedule it to framebuffer processor hardware. 840 + * 841 + * Called by the user via ioctl. 842 + * 843 + * Returns: 844 + * Zero on success, negative errno on failure. 845 + */ 846 + int exynos_drm_ipp_commit_ioctl(struct drm_device *dev, void *data, 847 + struct drm_file *file_priv) 848 + { 849 + struct drm_exynos_ioctl_ipp_commit *arg = data; 850 + struct exynos_drm_ipp *ipp; 851 + struct exynos_drm_ipp_task *task; 852 + int ret = 0; 853 + 854 + if ((arg->flags & ~DRM_EXYNOS_IPP_FLAGS) || arg->reserved) 855 + return -EINVAL; 856 + 857 + /* can't test and expect an event at the same time */ 858 + if ((arg->flags & DRM_EXYNOS_IPP_FLAG_TEST_ONLY) && 859 + (arg->flags & DRM_EXYNOS_IPP_FLAG_EVENT)) 860 + return -EINVAL; 861 + 862 + ipp = __ipp_get(arg->ipp_id); 863 + if (!ipp) 864 + return -ENOENT; 865 + 866 + task = exynos_drm_ipp_task_alloc(ipp); 867 + if (!task) 868 + return -ENOMEM; 869 + 870 + ret = exynos_drm_ipp_task_set(task, arg); 871 + if (ret) 872 + goto free; 873 + 874 + ret = exynos_drm_ipp_task_check(task); 875 + if (ret) 876 + goto free; 877 + 878 + ret = exynos_drm_ipp_task_setup_buffers(task, file_priv); 879 + if (ret || arg->flags & DRM_EXYNOS_IPP_FLAG_TEST_ONLY) 880 + goto free; 881 + 882 + if (arg->flags & DRM_EXYNOS_IPP_FLAG_EVENT) { 883 + ret = exynos_drm_ipp_event_create(task, file_priv, 884 + arg->user_data); 885 + if (ret) 886 + goto free; 887 + } 888 + 889 + /* 890 + * Queue task for processing on the hardware. task object will be 891 + * then freed after exynos_drm_ipp_task_done() 892 + */ 893 + if (arg->flags & DRM_EXYNOS_IPP_FLAG_NONBLOCK) { 894 + DRM_DEBUG_DRIVER("ipp: %d, nonblocking processing task %pK\n", 895 + ipp->id, task); 896 + 897 + task->flags |= DRM_EXYNOS_IPP_TASK_ASYNC; 898 + exynos_drm_ipp_schedule_task(task->ipp, task); 899 + ret = 0; 900 + } else { 901 + DRM_DEBUG_DRIVER("ipp: %d, processing task %pK\n", ipp->id, 902 + task); 903 + exynos_drm_ipp_schedule_task(ipp, task); 904 + ret = wait_event_interruptible(ipp->done_wq, 905 + task->flags & DRM_EXYNOS_IPP_TASK_DONE); 906 + if (ret) 907 + exynos_drm_ipp_task_abort(ipp, task); 908 + else 909 + ret = exynos_drm_ipp_task_cleanup(task); 910 + } 911 + return ret; 912 + free: 913 + exynos_drm_ipp_task_free(ipp, task); 914 + 915 + return ret; 916 + }
+175
drivers/gpu/drm/exynos/exynos_drm_ipp.h
··· 1 + /* 2 + * Copyright (c) 2017 Samsung Electronics Co., Ltd. 3 + * 4 + * This program is free software; you can redistribute it and/or modify it 5 + * under the terms of the GNU General Public License as published by the 6 + * Free Software Foundation; either version 2 of the License, or (at your 7 + * option) any later version. 8 + */ 9 + 10 + #ifndef _EXYNOS_DRM_IPP_H_ 11 + #define _EXYNOS_DRM_IPP_H_ 12 + 13 + #include <drm/drmP.h> 14 + 15 + struct exynos_drm_ipp; 16 + struct exynos_drm_ipp_task; 17 + 18 + /** 19 + * struct exynos_drm_ipp_funcs - exynos_drm_ipp control functions 20 + */ 21 + struct exynos_drm_ipp_funcs { 22 + /** 23 + * @commit: 24 + * 25 + * This is the main entry point to start framebuffer processing 26 + * in the hardware. The exynos_drm_ipp_task has been already validated. 27 + * This function must not wait until the device finishes processing. 28 + * When the driver finishes processing, it has to call 29 + * exynos_exynos_drm_ipp_task_done() function. 30 + * 31 + * RETURNS: 32 + * 33 + * 0 on success or negative error codes in case of failure. 34 + */ 35 + int (*commit)(struct exynos_drm_ipp *ipp, 36 + struct exynos_drm_ipp_task *task); 37 + 38 + /** 39 + * @abort: 40 + * 41 + * Informs the driver that it has to abort the currently running 42 + * task as soon as possible (i.e. as soon as it can stop the device 43 + * safely), even if the task would not have been finished by then. 44 + * After the driver performs the necessary steps, it has to call 45 + * exynos_drm_ipp_task_done() (as if the task ended normally). 46 + * This function does not have to (and will usually not) wait 47 + * until the device enters a state when it can be stopped. 48 + */ 49 + void (*abort)(struct exynos_drm_ipp *ipp, 50 + struct exynos_drm_ipp_task *task); 51 + }; 52 + 53 + /** 54 + * struct exynos_drm_ipp - central picture processor module structure 55 + */ 56 + struct exynos_drm_ipp { 57 + struct drm_device *dev; 58 + struct list_head head; 59 + unsigned int id; 60 + 61 + const char *name; 62 + const struct exynos_drm_ipp_funcs *funcs; 63 + unsigned int capabilities; 64 + const struct exynos_drm_ipp_formats *formats; 65 + unsigned int num_formats; 66 + atomic_t sequence; 67 + 68 + spinlock_t lock; 69 + struct exynos_drm_ipp_task *task; 70 + struct list_head todo_list; 71 + wait_queue_head_t done_wq; 72 + }; 73 + 74 + struct exynos_drm_ipp_buffer { 75 + struct drm_exynos_ipp_task_buffer buf; 76 + struct drm_exynos_ipp_task_rect rect; 77 + 78 + struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER]; 79 + const struct drm_format_info *format; 80 + dma_addr_t dma_addr[MAX_FB_BUFFER]; 81 + }; 82 + 83 + /** 84 + * struct exynos_drm_ipp_task - a structure describing transformation that 85 + * has to be performed by the picture processor hardware module 86 + */ 87 + struct exynos_drm_ipp_task { 88 + struct drm_device *dev; 89 + struct exynos_drm_ipp *ipp; 90 + struct list_head head; 91 + 92 + struct exynos_drm_ipp_buffer src; 93 + struct exynos_drm_ipp_buffer dst; 94 + 95 + struct drm_exynos_ipp_task_transform transform; 96 + struct drm_exynos_ipp_task_alpha alpha; 97 + 98 + struct work_struct cleanup_work; 99 + unsigned int flags; 100 + int ret; 101 + 102 + struct drm_pending_exynos_ipp_event *event; 103 + }; 104 + 105 + #define DRM_EXYNOS_IPP_TASK_DONE (1 << 0) 106 + #define DRM_EXYNOS_IPP_TASK_ASYNC (1 << 1) 107 + 108 + struct exynos_drm_ipp_formats { 109 + uint32_t fourcc; 110 + uint32_t type; 111 + uint64_t modifier; 112 + const struct drm_exynos_ipp_limit *limits; 113 + unsigned int num_limits; 114 + }; 115 + 116 + /* helper macros to set exynos_drm_ipp_formats structure and limits*/ 117 + #define IPP_SRCDST_MFORMAT(f, m, l) \ 118 + .fourcc = DRM_FORMAT_##f, .modifier = m, .limits = l, \ 119 + .num_limits = ARRAY_SIZE(l), \ 120 + .type = (DRM_EXYNOS_IPP_FORMAT_SOURCE | \ 121 + DRM_EXYNOS_IPP_FORMAT_DESTINATION) 122 + 123 + #define IPP_SRCDST_FORMAT(f, l) IPP_SRCDST_MFORMAT(f, 0, l) 124 + 125 + #define IPP_SIZE_LIMIT(l, val...) \ 126 + .type = (DRM_EXYNOS_IPP_LIMIT_TYPE_SIZE | \ 127 + DRM_EXYNOS_IPP_LIMIT_SIZE_##l), val 128 + 129 + #define IPP_SCALE_LIMIT(val...) \ 130 + .type = (DRM_EXYNOS_IPP_LIMIT_TYPE_SCALE), val 131 + 132 + int exynos_drm_ipp_register(struct drm_device *dev, struct exynos_drm_ipp *ipp, 133 + const struct exynos_drm_ipp_funcs *funcs, unsigned int caps, 134 + const struct exynos_drm_ipp_formats *formats, 135 + unsigned int num_formats, const char *name); 136 + void exynos_drm_ipp_unregister(struct drm_device *dev, 137 + struct exynos_drm_ipp *ipp); 138 + 139 + void exynos_drm_ipp_task_done(struct exynos_drm_ipp_task *task, int ret); 140 + 141 + #ifdef CONFIG_DRM_EXYNOS_IPP 142 + int exynos_drm_ipp_get_res_ioctl(struct drm_device *dev, void *data, 143 + struct drm_file *file_priv); 144 + int exynos_drm_ipp_get_caps_ioctl(struct drm_device *dev, void *data, 145 + struct drm_file *file_priv); 146 + int exynos_drm_ipp_get_limits_ioctl(struct drm_device *dev, void *data, 147 + struct drm_file *file_priv); 148 + int exynos_drm_ipp_commit_ioctl(struct drm_device *dev, 149 + void *data, struct drm_file *file_priv); 150 + #else 151 + static inline int exynos_drm_ipp_get_res_ioctl(struct drm_device *dev, 152 + void *data, struct drm_file *file_priv) 153 + { 154 + struct drm_exynos_ioctl_ipp_get_res *resp = data; 155 + 156 + resp->count_ipps = 0; 157 + return 0; 158 + } 159 + static inline int exynos_drm_ipp_get_caps_ioctl(struct drm_device *dev, 160 + void *data, struct drm_file *file_priv) 161 + { 162 + return -ENODEV; 163 + } 164 + static inline int exynos_drm_ipp_get_limits_ioctl(struct drm_device *dev, 165 + void *data, struct drm_file *file_priv) 166 + { 167 + return -ENODEV; 168 + } 169 + static inline int exynos_drm_ipp_commit_ioctl(struct drm_device *dev, 170 + void *data, struct drm_file *file_priv) 171 + { 172 + return -ENODEV; 173 + } 174 + #endif 175 + #endif
+195 -577
drivers/gpu/drm/exynos/exynos_drm_rotator.c
··· 10 10 */ 11 11 12 12 #include <linux/kernel.h> 13 + #include <linux/component.h> 13 14 #include <linux/err.h> 14 15 #include <linux/interrupt.h> 15 16 #include <linux/io.h> ··· 23 22 #include <drm/exynos_drm.h> 24 23 #include "regs-rotator.h" 25 24 #include "exynos_drm_drv.h" 25 + #include "exynos_drm_iommu.h" 26 26 #include "exynos_drm_ipp.h" 27 27 28 28 /* 29 29 * Rotator supports image crop/rotator and input/output DMA operations. 30 30 * input DMA reads image data from the memory. 31 31 * output DMA writes image data to memory. 32 - * 33 - * M2M operation : supports crop/scale/rotation/csc so on. 34 - * Memory ----> Rotator H/W ----> Memory. 35 32 */ 36 33 37 - /* 38 - * TODO 39 - * 1. check suspend/resume api if needed. 40 - * 2. need to check use case platform_device_id. 41 - * 3. check src/dst size with, height. 42 - * 4. need to add supported list in prop_list. 43 - */ 34 + #define ROTATOR_AUTOSUSPEND_DELAY 2000 44 35 45 - #define get_rot_context(dev) platform_get_drvdata(to_platform_device(dev)) 46 - #define get_ctx_from_ippdrv(ippdrv) container_of(ippdrv,\ 47 - struct rot_context, ippdrv); 48 - #define rot_read(offset) readl(rot->regs + (offset)) 36 + #define rot_read(offset) readl(rot->regs + (offset)) 49 37 #define rot_write(cfg, offset) writel(cfg, rot->regs + (offset)) 50 38 51 39 enum rot_irq_status { ··· 42 52 ROT_IRQ_STATUS_ILLEGAL = 9, 43 53 }; 44 54 45 - /* 46 - * A structure of limitation. 47 - * 48 - * @min_w: minimum width. 49 - * @min_h: minimum height. 50 - * @max_w: maximum width. 51 - * @max_h: maximum height. 52 - * @align: align size. 53 - */ 54 - struct rot_limit { 55 - u32 min_w; 56 - u32 min_h; 57 - u32 max_w; 58 - u32 max_h; 59 - u32 align; 60 - }; 61 - 62 - /* 63 - * A structure of limitation table. 64 - * 65 - * @ycbcr420_2p: case of YUV. 66 - * @rgb888: case of RGB. 67 - */ 68 - struct rot_limit_table { 69 - struct rot_limit ycbcr420_2p; 70 - struct rot_limit rgb888; 55 + struct rot_variant { 56 + const struct exynos_drm_ipp_formats *formats; 57 + unsigned int num_formats; 71 58 }; 72 59 73 60 /* 74 61 * A structure of rotator context. 75 62 * @ippdrv: prepare initialization using ippdrv. 76 - * @regs_res: register resources. 77 63 * @regs: memory mapped io registers. 78 64 * @clock: rotator gate clock. 79 65 * @limit_tbl: limitation of rotator. 80 66 * @irq: irq number. 81 - * @cur_buf_id: current operation buffer id. 82 - * @suspended: suspended state. 83 67 */ 84 68 struct rot_context { 85 - struct exynos_drm_ippdrv ippdrv; 86 - struct resource *regs_res; 69 + struct exynos_drm_ipp ipp; 70 + struct drm_device *drm_dev; 71 + struct device *dev; 87 72 void __iomem *regs; 88 73 struct clk *clock; 89 - struct rot_limit_table *limit_tbl; 90 - int irq; 91 - int cur_buf_id[EXYNOS_DRM_OPS_MAX]; 92 - bool suspended; 74 + const struct exynos_drm_ipp_formats *formats; 75 + unsigned int num_formats; 76 + struct exynos_drm_ipp_task *task; 93 77 }; 94 78 95 79 static void rotator_reg_set_irq(struct rot_context *rot, bool enable) ··· 76 112 val &= ~ROT_CONFIG_IRQ; 77 113 78 114 rot_write(val, ROT_CONFIG); 79 - } 80 - 81 - static u32 rotator_reg_get_fmt(struct rot_context *rot) 82 - { 83 - u32 val = rot_read(ROT_CONTROL); 84 - 85 - val &= ROT_CONTROL_FMT_MASK; 86 - 87 - return val; 88 115 } 89 116 90 117 static enum rot_irq_status rotator_reg_get_irq_status(struct rot_context *rot) ··· 93 138 static irqreturn_t rotator_irq_handler(int irq, void *arg) 94 139 { 95 140 struct rot_context *rot = arg; 96 - struct exynos_drm_ippdrv *ippdrv = &rot->ippdrv; 97 - struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node; 98 - struct drm_exynos_ipp_event_work *event_work = c_node->event_work; 99 141 enum rot_irq_status irq_status; 100 142 u32 val; 101 143 ··· 104 152 val |= ROT_STATUS_IRQ_PENDING((u32)irq_status); 105 153 rot_write(val, ROT_STATUS); 106 154 107 - if (irq_status == ROT_IRQ_STATUS_COMPLETE) { 108 - event_work->ippdrv = ippdrv; 109 - event_work->buf_id[EXYNOS_DRM_OPS_DST] = 110 - rot->cur_buf_id[EXYNOS_DRM_OPS_DST]; 111 - queue_work(ippdrv->event_workq, &event_work->work); 112 - } else { 113 - DRM_ERROR("the SFR is set illegally\n"); 155 + if (rot->task) { 156 + struct exynos_drm_ipp_task *task = rot->task; 157 + 158 + rot->task = NULL; 159 + pm_runtime_mark_last_busy(rot->dev); 160 + pm_runtime_put_autosuspend(rot->dev); 161 + exynos_drm_ipp_task_done(task, 162 + irq_status == ROT_IRQ_STATUS_COMPLETE ? 0 : -EINVAL); 114 163 } 115 164 116 165 return IRQ_HANDLED; 117 166 } 118 167 119 - static void rotator_align_size(struct rot_context *rot, u32 fmt, u32 *hsize, 120 - u32 *vsize) 168 + static void rotator_src_set_fmt(struct rot_context *rot, u32 fmt) 121 169 { 122 - struct rot_limit_table *limit_tbl = rot->limit_tbl; 123 - struct rot_limit *limit; 124 - u32 mask, val; 125 - 126 - /* Get size limit */ 127 - if (fmt == ROT_CONTROL_FMT_RGB888) 128 - limit = &limit_tbl->rgb888; 129 - else 130 - limit = &limit_tbl->ycbcr420_2p; 131 - 132 - /* Get mask for rounding to nearest aligned val */ 133 - mask = ~((1 << limit->align) - 1); 134 - 135 - /* Set aligned width */ 136 - val = ROT_ALIGN(*hsize, limit->align, mask); 137 - if (val < limit->min_w) 138 - *hsize = ROT_MIN(limit->min_w, mask); 139 - else if (val > limit->max_w) 140 - *hsize = ROT_MAX(limit->max_w, mask); 141 - else 142 - *hsize = val; 143 - 144 - /* Set aligned height */ 145 - val = ROT_ALIGN(*vsize, limit->align, mask); 146 - if (val < limit->min_h) 147 - *vsize = ROT_MIN(limit->min_h, mask); 148 - else if (val > limit->max_h) 149 - *vsize = ROT_MAX(limit->max_h, mask); 150 - else 151 - *vsize = val; 152 - } 153 - 154 - static int rotator_src_set_fmt(struct device *dev, u32 fmt) 155 - { 156 - struct rot_context *rot = dev_get_drvdata(dev); 157 170 u32 val; 158 171 159 172 val = rot_read(ROT_CONTROL); ··· 131 214 case DRM_FORMAT_XRGB8888: 132 215 val |= ROT_CONTROL_FMT_RGB888; 133 216 break; 134 - default: 135 - DRM_ERROR("invalid image format\n"); 136 - return -EINVAL; 137 217 } 138 218 139 219 rot_write(val, ROT_CONTROL); 140 - 141 - return 0; 142 220 } 143 221 144 - static inline bool rotator_check_reg_fmt(u32 fmt) 222 + static void rotator_src_set_buf(struct rot_context *rot, 223 + struct exynos_drm_ipp_buffer *buf) 145 224 { 146 - if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) || 147 - (fmt == ROT_CONTROL_FMT_RGB888)) 148 - return true; 149 - 150 - return false; 151 - } 152 - 153 - static int rotator_src_set_size(struct device *dev, int swap, 154 - struct drm_exynos_pos *pos, 155 - struct drm_exynos_sz *sz) 156 - { 157 - struct rot_context *rot = dev_get_drvdata(dev); 158 - u32 fmt, hsize, vsize; 159 225 u32 val; 160 226 161 - /* Get format */ 162 - fmt = rotator_reg_get_fmt(rot); 163 - if (!rotator_check_reg_fmt(fmt)) { 164 - DRM_ERROR("invalid format.\n"); 165 - return -EINVAL; 166 - } 167 - 168 - /* Align buffer size */ 169 - hsize = sz->hsize; 170 - vsize = sz->vsize; 171 - rotator_align_size(rot, fmt, &hsize, &vsize); 172 - 173 227 /* Set buffer size configuration */ 174 - val = ROT_SET_BUF_SIZE_H(vsize) | ROT_SET_BUF_SIZE_W(hsize); 228 + val = ROT_SET_BUF_SIZE_H(buf->buf.height) | 229 + ROT_SET_BUF_SIZE_W(buf->buf.pitch[0] / buf->format->cpp[0]); 175 230 rot_write(val, ROT_SRC_BUF_SIZE); 176 231 177 232 /* Set crop image position configuration */ 178 - val = ROT_CROP_POS_Y(pos->y) | ROT_CROP_POS_X(pos->x); 233 + val = ROT_CROP_POS_Y(buf->rect.y) | ROT_CROP_POS_X(buf->rect.x); 179 234 rot_write(val, ROT_SRC_CROP_POS); 180 - val = ROT_SRC_CROP_SIZE_H(pos->h) | ROT_SRC_CROP_SIZE_W(pos->w); 235 + val = ROT_SRC_CROP_SIZE_H(buf->rect.h) | 236 + ROT_SRC_CROP_SIZE_W(buf->rect.w); 181 237 rot_write(val, ROT_SRC_CROP_SIZE); 182 238 183 - return 0; 239 + /* Set buffer DMA address */ 240 + rot_write(buf->dma_addr[0], ROT_SRC_BUF_ADDR(0)); 241 + rot_write(buf->dma_addr[1], ROT_SRC_BUF_ADDR(1)); 184 242 } 185 243 186 - static int rotator_src_set_addr(struct device *dev, 187 - struct drm_exynos_ipp_buf_info *buf_info, 188 - u32 buf_id, enum drm_exynos_ipp_buf_type buf_type) 244 + static void rotator_dst_set_transf(struct rot_context *rot, 245 + unsigned int rotation) 189 246 { 190 - struct rot_context *rot = dev_get_drvdata(dev); 191 - dma_addr_t addr[EXYNOS_DRM_PLANAR_MAX]; 192 - u32 val, fmt, hsize, vsize; 193 - int i; 194 - 195 - /* Set current buf_id */ 196 - rot->cur_buf_id[EXYNOS_DRM_OPS_SRC] = buf_id; 197 - 198 - switch (buf_type) { 199 - case IPP_BUF_ENQUEUE: 200 - /* Set address configuration */ 201 - for_each_ipp_planar(i) 202 - addr[i] = buf_info->base[i]; 203 - 204 - /* Get format */ 205 - fmt = rotator_reg_get_fmt(rot); 206 - if (!rotator_check_reg_fmt(fmt)) { 207 - DRM_ERROR("invalid format.\n"); 208 - return -EINVAL; 209 - } 210 - 211 - /* Re-set cb planar for NV12 format */ 212 - if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) && 213 - !addr[EXYNOS_DRM_PLANAR_CB]) { 214 - 215 - val = rot_read(ROT_SRC_BUF_SIZE); 216 - hsize = ROT_GET_BUF_SIZE_W(val); 217 - vsize = ROT_GET_BUF_SIZE_H(val); 218 - 219 - /* Set cb planar */ 220 - addr[EXYNOS_DRM_PLANAR_CB] = 221 - addr[EXYNOS_DRM_PLANAR_Y] + hsize * vsize; 222 - } 223 - 224 - for_each_ipp_planar(i) 225 - rot_write(addr[i], ROT_SRC_BUF_ADDR(i)); 226 - break; 227 - case IPP_BUF_DEQUEUE: 228 - for_each_ipp_planar(i) 229 - rot_write(0x0, ROT_SRC_BUF_ADDR(i)); 230 - break; 231 - default: 232 - /* Nothing to do */ 233 - break; 234 - } 235 - 236 - return 0; 237 - } 238 - 239 - static int rotator_dst_set_transf(struct device *dev, 240 - enum drm_exynos_degree degree, 241 - enum drm_exynos_flip flip, bool *swap) 242 - { 243 - struct rot_context *rot = dev_get_drvdata(dev); 244 247 u32 val; 245 248 246 249 /* Set transform configuration */ 247 250 val = rot_read(ROT_CONTROL); 248 251 val &= ~ROT_CONTROL_FLIP_MASK; 249 252 250 - switch (flip) { 251 - case EXYNOS_DRM_FLIP_VERTICAL: 252 - val |= ROT_CONTROL_FLIP_VERTICAL; 253 - break; 254 - case EXYNOS_DRM_FLIP_HORIZONTAL: 253 + if (rotation & DRM_MODE_REFLECT_X) 255 254 val |= ROT_CONTROL_FLIP_HORIZONTAL; 256 - break; 257 - default: 258 - /* Flip None */ 259 - break; 260 - } 255 + if (rotation & DRM_MODE_REFLECT_Y) 256 + val |= ROT_CONTROL_FLIP_VERTICAL; 261 257 262 258 val &= ~ROT_CONTROL_ROT_MASK; 263 259 264 - switch (degree) { 265 - case EXYNOS_DRM_DEGREE_90: 260 + if (rotation & DRM_MODE_ROTATE_90) 266 261 val |= ROT_CONTROL_ROT_90; 267 - break; 268 - case EXYNOS_DRM_DEGREE_180: 262 + else if (rotation & DRM_MODE_ROTATE_180) 269 263 val |= ROT_CONTROL_ROT_180; 270 - break; 271 - case EXYNOS_DRM_DEGREE_270: 264 + else if (rotation & DRM_MODE_ROTATE_270) 272 265 val |= ROT_CONTROL_ROT_270; 273 - break; 274 - default: 275 - /* Rotation 0 Degree */ 276 - break; 277 - } 278 266 279 267 rot_write(val, ROT_CONTROL); 280 - 281 - /* Check degree for setting buffer size swap */ 282 - if ((degree == EXYNOS_DRM_DEGREE_90) || 283 - (degree == EXYNOS_DRM_DEGREE_270)) 284 - *swap = true; 285 - else 286 - *swap = false; 287 - 288 - return 0; 289 268 } 290 269 291 - static int rotator_dst_set_size(struct device *dev, int swap, 292 - struct drm_exynos_pos *pos, 293 - struct drm_exynos_sz *sz) 270 + static void rotator_dst_set_buf(struct rot_context *rot, 271 + struct exynos_drm_ipp_buffer *buf) 294 272 { 295 - struct rot_context *rot = dev_get_drvdata(dev); 296 - u32 val, fmt, hsize, vsize; 297 - 298 - /* Get format */ 299 - fmt = rotator_reg_get_fmt(rot); 300 - if (!rotator_check_reg_fmt(fmt)) { 301 - DRM_ERROR("invalid format.\n"); 302 - return -EINVAL; 303 - } 304 - 305 - /* Align buffer size */ 306 - hsize = sz->hsize; 307 - vsize = sz->vsize; 308 - rotator_align_size(rot, fmt, &hsize, &vsize); 273 + u32 val; 309 274 310 275 /* Set buffer size configuration */ 311 - val = ROT_SET_BUF_SIZE_H(vsize) | ROT_SET_BUF_SIZE_W(hsize); 276 + val = ROT_SET_BUF_SIZE_H(buf->buf.height) | 277 + ROT_SET_BUF_SIZE_W(buf->buf.pitch[0] / buf->format->cpp[0]); 312 278 rot_write(val, ROT_DST_BUF_SIZE); 313 279 314 280 /* Set crop image position configuration */ 315 - val = ROT_CROP_POS_Y(pos->y) | ROT_CROP_POS_X(pos->x); 281 + val = ROT_CROP_POS_Y(buf->rect.y) | ROT_CROP_POS_X(buf->rect.x); 316 282 rot_write(val, ROT_DST_CROP_POS); 317 283 318 - return 0; 284 + /* Set buffer DMA address */ 285 + rot_write(buf->dma_addr[0], ROT_DST_BUF_ADDR(0)); 286 + rot_write(buf->dma_addr[1], ROT_DST_BUF_ADDR(1)); 319 287 } 320 288 321 - static int rotator_dst_set_addr(struct device *dev, 322 - struct drm_exynos_ipp_buf_info *buf_info, 323 - u32 buf_id, enum drm_exynos_ipp_buf_type buf_type) 289 + static void rotator_start(struct rot_context *rot) 324 290 { 325 - struct rot_context *rot = dev_get_drvdata(dev); 326 - dma_addr_t addr[EXYNOS_DRM_PLANAR_MAX]; 327 - u32 val, fmt, hsize, vsize; 328 - int i; 329 - 330 - /* Set current buf_id */ 331 - rot->cur_buf_id[EXYNOS_DRM_OPS_DST] = buf_id; 332 - 333 - switch (buf_type) { 334 - case IPP_BUF_ENQUEUE: 335 - /* Set address configuration */ 336 - for_each_ipp_planar(i) 337 - addr[i] = buf_info->base[i]; 338 - 339 - /* Get format */ 340 - fmt = rotator_reg_get_fmt(rot); 341 - if (!rotator_check_reg_fmt(fmt)) { 342 - DRM_ERROR("invalid format.\n"); 343 - return -EINVAL; 344 - } 345 - 346 - /* Re-set cb planar for NV12 format */ 347 - if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) && 348 - !addr[EXYNOS_DRM_PLANAR_CB]) { 349 - /* Get buf size */ 350 - val = rot_read(ROT_DST_BUF_SIZE); 351 - 352 - hsize = ROT_GET_BUF_SIZE_W(val); 353 - vsize = ROT_GET_BUF_SIZE_H(val); 354 - 355 - /* Set cb planar */ 356 - addr[EXYNOS_DRM_PLANAR_CB] = 357 - addr[EXYNOS_DRM_PLANAR_Y] + hsize * vsize; 358 - } 359 - 360 - for_each_ipp_planar(i) 361 - rot_write(addr[i], ROT_DST_BUF_ADDR(i)); 362 - break; 363 - case IPP_BUF_DEQUEUE: 364 - for_each_ipp_planar(i) 365 - rot_write(0x0, ROT_DST_BUF_ADDR(i)); 366 - break; 367 - default: 368 - /* Nothing to do */ 369 - break; 370 - } 371 - 372 - return 0; 373 - } 374 - 375 - static struct exynos_drm_ipp_ops rot_src_ops = { 376 - .set_fmt = rotator_src_set_fmt, 377 - .set_size = rotator_src_set_size, 378 - .set_addr = rotator_src_set_addr, 379 - }; 380 - 381 - static struct exynos_drm_ipp_ops rot_dst_ops = { 382 - .set_transf = rotator_dst_set_transf, 383 - .set_size = rotator_dst_set_size, 384 - .set_addr = rotator_dst_set_addr, 385 - }; 386 - 387 - static int rotator_init_prop_list(struct exynos_drm_ippdrv *ippdrv) 388 - { 389 - struct drm_exynos_ipp_prop_list *prop_list = &ippdrv->prop_list; 390 - 391 - prop_list->version = 1; 392 - prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) | 393 - (1 << EXYNOS_DRM_FLIP_HORIZONTAL); 394 - prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) | 395 - (1 << EXYNOS_DRM_DEGREE_90) | 396 - (1 << EXYNOS_DRM_DEGREE_180) | 397 - (1 << EXYNOS_DRM_DEGREE_270); 398 - prop_list->csc = 0; 399 - prop_list->crop = 0; 400 - prop_list->scale = 0; 401 - 402 - return 0; 403 - } 404 - 405 - static inline bool rotator_check_drm_fmt(u32 fmt) 406 - { 407 - switch (fmt) { 408 - case DRM_FORMAT_XRGB8888: 409 - case DRM_FORMAT_NV12: 410 - return true; 411 - default: 412 - DRM_DEBUG_KMS("not support format\n"); 413 - return false; 414 - } 415 - } 416 - 417 - static inline bool rotator_check_drm_flip(enum drm_exynos_flip flip) 418 - { 419 - switch (flip) { 420 - case EXYNOS_DRM_FLIP_NONE: 421 - case EXYNOS_DRM_FLIP_VERTICAL: 422 - case EXYNOS_DRM_FLIP_HORIZONTAL: 423 - case EXYNOS_DRM_FLIP_BOTH: 424 - return true; 425 - default: 426 - DRM_DEBUG_KMS("invalid flip\n"); 427 - return false; 428 - } 429 - } 430 - 431 - static int rotator_ippdrv_check_property(struct device *dev, 432 - struct drm_exynos_ipp_property *property) 433 - { 434 - struct drm_exynos_ipp_config *src_config = 435 - &property->config[EXYNOS_DRM_OPS_SRC]; 436 - struct drm_exynos_ipp_config *dst_config = 437 - &property->config[EXYNOS_DRM_OPS_DST]; 438 - struct drm_exynos_pos *src_pos = &src_config->pos; 439 - struct drm_exynos_pos *dst_pos = &dst_config->pos; 440 - struct drm_exynos_sz *src_sz = &src_config->sz; 441 - struct drm_exynos_sz *dst_sz = &dst_config->sz; 442 - bool swap = false; 443 - 444 - /* Check format configuration */ 445 - if (src_config->fmt != dst_config->fmt) { 446 - DRM_DEBUG_KMS("not support csc feature\n"); 447 - return -EINVAL; 448 - } 449 - 450 - if (!rotator_check_drm_fmt(dst_config->fmt)) { 451 - DRM_DEBUG_KMS("invalid format\n"); 452 - return -EINVAL; 453 - } 454 - 455 - /* Check transform configuration */ 456 - if (src_config->degree != EXYNOS_DRM_DEGREE_0) { 457 - DRM_DEBUG_KMS("not support source-side rotation\n"); 458 - return -EINVAL; 459 - } 460 - 461 - switch (dst_config->degree) { 462 - case EXYNOS_DRM_DEGREE_90: 463 - case EXYNOS_DRM_DEGREE_270: 464 - swap = true; 465 - case EXYNOS_DRM_DEGREE_0: 466 - case EXYNOS_DRM_DEGREE_180: 467 - /* No problem */ 468 - break; 469 - default: 470 - DRM_DEBUG_KMS("invalid degree\n"); 471 - return -EINVAL; 472 - } 473 - 474 - if (src_config->flip != EXYNOS_DRM_FLIP_NONE) { 475 - DRM_DEBUG_KMS("not support source-side flip\n"); 476 - return -EINVAL; 477 - } 478 - 479 - if (!rotator_check_drm_flip(dst_config->flip)) { 480 - DRM_DEBUG_KMS("invalid flip\n"); 481 - return -EINVAL; 482 - } 483 - 484 - /* Check size configuration */ 485 - if ((src_pos->x + src_pos->w > src_sz->hsize) || 486 - (src_pos->y + src_pos->h > src_sz->vsize)) { 487 - DRM_DEBUG_KMS("out of source buffer bound\n"); 488 - return -EINVAL; 489 - } 490 - 491 - if (swap) { 492 - if ((dst_pos->x + dst_pos->h > dst_sz->vsize) || 493 - (dst_pos->y + dst_pos->w > dst_sz->hsize)) { 494 - DRM_DEBUG_KMS("out of destination buffer bound\n"); 495 - return -EINVAL; 496 - } 497 - 498 - if ((src_pos->w != dst_pos->h) || (src_pos->h != dst_pos->w)) { 499 - DRM_DEBUG_KMS("not support scale feature\n"); 500 - return -EINVAL; 501 - } 502 - } else { 503 - if ((dst_pos->x + dst_pos->w > dst_sz->hsize) || 504 - (dst_pos->y + dst_pos->h > dst_sz->vsize)) { 505 - DRM_DEBUG_KMS("out of destination buffer bound\n"); 506 - return -EINVAL; 507 - } 508 - 509 - if ((src_pos->w != dst_pos->w) || (src_pos->h != dst_pos->h)) { 510 - DRM_DEBUG_KMS("not support scale feature\n"); 511 - return -EINVAL; 512 - } 513 - } 514 - 515 - return 0; 516 - } 517 - 518 - static int rotator_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd) 519 - { 520 - struct rot_context *rot = dev_get_drvdata(dev); 521 291 u32 val; 522 - 523 - if (rot->suspended) { 524 - DRM_ERROR("suspended state\n"); 525 - return -EPERM; 526 - } 527 - 528 - if (cmd != IPP_CMD_M2M) { 529 - DRM_ERROR("not support cmd: %d\n", cmd); 530 - return -EINVAL; 531 - } 532 292 533 293 /* Set interrupt enable */ 534 294 rotator_reg_set_irq(rot, true); 535 295 536 296 val = rot_read(ROT_CONTROL); 537 297 val |= ROT_CONTROL_START; 538 - 539 298 rot_write(val, ROT_CONTROL); 299 + } 300 + 301 + static int rotator_commit(struct exynos_drm_ipp *ipp, 302 + struct exynos_drm_ipp_task *task) 303 + { 304 + struct rot_context *rot = 305 + container_of(ipp, struct rot_context, ipp); 306 + 307 + pm_runtime_get_sync(rot->dev); 308 + rot->task = task; 309 + 310 + rotator_src_set_fmt(rot, task->src.buf.fourcc); 311 + rotator_src_set_buf(rot, &task->src); 312 + rotator_dst_set_transf(rot, task->transform.rotation); 313 + rotator_dst_set_buf(rot, &task->dst); 314 + rotator_start(rot); 540 315 541 316 return 0; 542 317 } 543 318 544 - static struct rot_limit_table rot_limit_tbl_4210 = { 545 - .ycbcr420_2p = { 546 - .min_w = 32, 547 - .min_h = 32, 548 - .max_w = SZ_64K, 549 - .max_h = SZ_64K, 550 - .align = 3, 551 - }, 552 - .rgb888 = { 553 - .min_w = 8, 554 - .min_h = 8, 555 - .max_w = SZ_16K, 556 - .max_h = SZ_16K, 557 - .align = 2, 558 - }, 319 + static const struct exynos_drm_ipp_funcs ipp_funcs = { 320 + .commit = rotator_commit, 559 321 }; 560 322 561 - static struct rot_limit_table rot_limit_tbl_4x12 = { 562 - .ycbcr420_2p = { 563 - .min_w = 32, 564 - .min_h = 32, 565 - .max_w = SZ_32K, 566 - .max_h = SZ_32K, 567 - .align = 3, 568 - }, 569 - .rgb888 = { 570 - .min_w = 8, 571 - .min_h = 8, 572 - .max_w = SZ_8K, 573 - .max_h = SZ_8K, 574 - .align = 2, 575 - }, 576 - }; 323 + static int rotator_bind(struct device *dev, struct device *master, void *data) 324 + { 325 + struct rot_context *rot = dev_get_drvdata(dev); 326 + struct drm_device *drm_dev = data; 327 + struct exynos_drm_ipp *ipp = &rot->ipp; 577 328 578 - static struct rot_limit_table rot_limit_tbl_5250 = { 579 - .ycbcr420_2p = { 580 - .min_w = 32, 581 - .min_h = 32, 582 - .max_w = SZ_32K, 583 - .max_h = SZ_32K, 584 - .align = 3, 585 - }, 586 - .rgb888 = { 587 - .min_w = 8, 588 - .min_h = 8, 589 - .max_w = SZ_8K, 590 - .max_h = SZ_8K, 591 - .align = 1, 592 - }, 593 - }; 329 + rot->drm_dev = drm_dev; 330 + drm_iommu_attach_device(drm_dev, dev); 594 331 595 - static const struct of_device_id exynos_rotator_match[] = { 596 - { 597 - .compatible = "samsung,exynos4210-rotator", 598 - .data = &rot_limit_tbl_4210, 599 - }, 600 - { 601 - .compatible = "samsung,exynos4212-rotator", 602 - .data = &rot_limit_tbl_4x12, 603 - }, 604 - { 605 - .compatible = "samsung,exynos5250-rotator", 606 - .data = &rot_limit_tbl_5250, 607 - }, 608 - {}, 332 + exynos_drm_ipp_register(drm_dev, ipp, &ipp_funcs, 333 + DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE, 334 + rot->formats, rot->num_formats, "rotator"); 335 + 336 + dev_info(dev, "The exynos rotator has been probed successfully\n"); 337 + 338 + return 0; 339 + } 340 + 341 + static void rotator_unbind(struct device *dev, struct device *master, 342 + void *data) 343 + { 344 + struct rot_context *rot = dev_get_drvdata(dev); 345 + struct drm_device *drm_dev = data; 346 + struct exynos_drm_ipp *ipp = &rot->ipp; 347 + 348 + exynos_drm_ipp_unregister(drm_dev, ipp); 349 + drm_iommu_detach_device(rot->drm_dev, rot->dev); 350 + } 351 + 352 + static const struct component_ops rotator_component_ops = { 353 + .bind = rotator_bind, 354 + .unbind = rotator_unbind, 609 355 }; 610 - MODULE_DEVICE_TABLE(of, exynos_rotator_match); 611 356 612 357 static int rotator_probe(struct platform_device *pdev) 613 358 { 614 359 struct device *dev = &pdev->dev; 360 + struct resource *regs_res; 615 361 struct rot_context *rot; 616 - struct exynos_drm_ippdrv *ippdrv; 362 + const struct rot_variant *variant; 363 + int irq; 617 364 int ret; 618 - 619 - if (!dev->of_node) { 620 - dev_err(dev, "cannot find of_node.\n"); 621 - return -ENODEV; 622 - } 623 365 624 366 rot = devm_kzalloc(dev, sizeof(*rot), GFP_KERNEL); 625 367 if (!rot) 626 368 return -ENOMEM; 627 369 628 - rot->limit_tbl = (struct rot_limit_table *) 629 - of_device_get_match_data(dev); 630 - rot->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 631 - rot->regs = devm_ioremap_resource(dev, rot->regs_res); 370 + variant = of_device_get_match_data(dev); 371 + rot->formats = variant->formats; 372 + rot->num_formats = variant->num_formats; 373 + rot->dev = dev; 374 + regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 375 + rot->regs = devm_ioremap_resource(dev, regs_res); 632 376 if (IS_ERR(rot->regs)) 633 377 return PTR_ERR(rot->regs); 634 378 635 - rot->irq = platform_get_irq(pdev, 0); 636 - if (rot->irq < 0) { 379 + irq = platform_get_irq(pdev, 0); 380 + if (irq < 0) { 637 381 dev_err(dev, "failed to get irq\n"); 638 - return rot->irq; 382 + return irq; 639 383 } 640 384 641 - ret = devm_request_threaded_irq(dev, rot->irq, NULL, 642 - rotator_irq_handler, IRQF_ONESHOT, "drm_rotator", rot); 385 + ret = devm_request_irq(dev, irq, rotator_irq_handler, 0, dev_name(dev), 386 + rot); 643 387 if (ret < 0) { 644 388 dev_err(dev, "failed to request irq\n"); 645 389 return ret; ··· 312 734 return PTR_ERR(rot->clock); 313 735 } 314 736 737 + pm_runtime_use_autosuspend(dev); 738 + pm_runtime_set_autosuspend_delay(dev, ROTATOR_AUTOSUSPEND_DELAY); 315 739 pm_runtime_enable(dev); 316 - 317 - ippdrv = &rot->ippdrv; 318 - ippdrv->dev = dev; 319 - ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &rot_src_ops; 320 - ippdrv->ops[EXYNOS_DRM_OPS_DST] = &rot_dst_ops; 321 - ippdrv->check_property = rotator_ippdrv_check_property; 322 - ippdrv->start = rotator_ippdrv_start; 323 - ret = rotator_init_prop_list(ippdrv); 324 - if (ret < 0) { 325 - dev_err(dev, "failed to init property list.\n"); 326 - goto err_ippdrv_register; 327 - } 328 - 329 - DRM_DEBUG_KMS("ippdrv[%pK]\n", ippdrv); 330 - 331 740 platform_set_drvdata(pdev, rot); 332 741 333 - ret = exynos_drm_ippdrv_register(ippdrv); 334 - if (ret < 0) { 335 - dev_err(dev, "failed to register drm rotator device\n"); 336 - goto err_ippdrv_register; 337 - } 338 - 339 - dev_info(dev, "The exynos rotator is probed successfully\n"); 742 + ret = component_add(dev, &rotator_component_ops); 743 + if (ret) 744 + goto err_component; 340 745 341 746 return 0; 342 747 343 - err_ippdrv_register: 748 + err_component: 749 + pm_runtime_dont_use_autosuspend(dev); 344 750 pm_runtime_disable(dev); 345 751 return ret; 346 752 } ··· 332 770 static int rotator_remove(struct platform_device *pdev) 333 771 { 334 772 struct device *dev = &pdev->dev; 335 - struct rot_context *rot = dev_get_drvdata(dev); 336 - struct exynos_drm_ippdrv *ippdrv = &rot->ippdrv; 337 773 338 - exynos_drm_ippdrv_unregister(ippdrv); 339 - 774 + component_del(dev, &rotator_component_ops); 775 + pm_runtime_dont_use_autosuspend(dev); 340 776 pm_runtime_disable(dev); 341 777 342 778 return 0; 343 779 } 344 780 345 781 #ifdef CONFIG_PM 346 - static int rotator_clk_crtl(struct rot_context *rot, bool enable) 347 - { 348 - if (enable) { 349 - clk_prepare_enable(rot->clock); 350 - rot->suspended = false; 351 - } else { 352 - clk_disable_unprepare(rot->clock); 353 - rot->suspended = true; 354 - } 355 - 356 - return 0; 357 - } 358 - 359 782 static int rotator_runtime_suspend(struct device *dev) 360 783 { 361 784 struct rot_context *rot = dev_get_drvdata(dev); 362 785 363 - return rotator_clk_crtl(rot, false); 786 + clk_disable_unprepare(rot->clock); 787 + return 0; 364 788 } 365 789 366 790 static int rotator_runtime_resume(struct device *dev) 367 791 { 368 792 struct rot_context *rot = dev_get_drvdata(dev); 369 793 370 - return rotator_clk_crtl(rot, true); 794 + return clk_prepare_enable(rot->clock); 371 795 } 372 796 #endif 797 + 798 + static const struct drm_exynos_ipp_limit rotator_4210_rbg888_limits[] = { 799 + { IPP_SIZE_LIMIT(BUFFER, .h = { 8, SZ_16K }, .v = { 8, SZ_16K }) }, 800 + { IPP_SIZE_LIMIT(AREA, .h.align = 4, .v.align = 4) }, 801 + }; 802 + 803 + static const struct drm_exynos_ipp_limit rotator_4412_rbg888_limits[] = { 804 + { IPP_SIZE_LIMIT(BUFFER, .h = { 8, SZ_8K }, .v = { 8, SZ_8K }) }, 805 + { IPP_SIZE_LIMIT(AREA, .h.align = 4, .v.align = 4) }, 806 + }; 807 + 808 + static const struct drm_exynos_ipp_limit rotator_5250_rbg888_limits[] = { 809 + { IPP_SIZE_LIMIT(BUFFER, .h = { 8, SZ_8K }, .v = { 8, SZ_8K }) }, 810 + { IPP_SIZE_LIMIT(AREA, .h.align = 2, .v.align = 2) }, 811 + }; 812 + 813 + static const struct drm_exynos_ipp_limit rotator_4210_yuv_limits[] = { 814 + { IPP_SIZE_LIMIT(BUFFER, .h = { 32, SZ_64K }, .v = { 32, SZ_64K }) }, 815 + { IPP_SIZE_LIMIT(AREA, .h.align = 8, .v.align = 8) }, 816 + }; 817 + 818 + static const struct drm_exynos_ipp_limit rotator_4412_yuv_limits[] = { 819 + { IPP_SIZE_LIMIT(BUFFER, .h = { 32, SZ_32K }, .v = { 32, SZ_32K }) }, 820 + { IPP_SIZE_LIMIT(AREA, .h.align = 8, .v.align = 8) }, 821 + }; 822 + 823 + static const struct exynos_drm_ipp_formats rotator_4210_formats[] = { 824 + { IPP_SRCDST_FORMAT(XRGB8888, rotator_4210_rbg888_limits) }, 825 + { IPP_SRCDST_FORMAT(NV12, rotator_4210_yuv_limits) }, 826 + }; 827 + 828 + static const struct exynos_drm_ipp_formats rotator_4412_formats[] = { 829 + { IPP_SRCDST_FORMAT(XRGB8888, rotator_4412_rbg888_limits) }, 830 + { IPP_SRCDST_FORMAT(NV12, rotator_4412_yuv_limits) }, 831 + }; 832 + 833 + static const struct exynos_drm_ipp_formats rotator_5250_formats[] = { 834 + { IPP_SRCDST_FORMAT(XRGB8888, rotator_5250_rbg888_limits) }, 835 + { IPP_SRCDST_FORMAT(NV12, rotator_4412_yuv_limits) }, 836 + }; 837 + 838 + static const struct rot_variant rotator_4210_data = { 839 + .formats = rotator_4210_formats, 840 + .num_formats = ARRAY_SIZE(rotator_4210_formats), 841 + }; 842 + 843 + static const struct rot_variant rotator_4412_data = { 844 + .formats = rotator_4412_formats, 845 + .num_formats = ARRAY_SIZE(rotator_4412_formats), 846 + }; 847 + 848 + static const struct rot_variant rotator_5250_data = { 849 + .formats = rotator_5250_formats, 850 + .num_formats = ARRAY_SIZE(rotator_5250_formats), 851 + }; 852 + 853 + static const struct of_device_id exynos_rotator_match[] = { 854 + { 855 + .compatible = "samsung,exynos4210-rotator", 856 + .data = &rotator_4210_data, 857 + }, { 858 + .compatible = "samsung,exynos4212-rotator", 859 + .data = &rotator_4412_data, 860 + }, { 861 + .compatible = "samsung,exynos5250-rotator", 862 + .data = &rotator_5250_data, 863 + }, { 864 + }, 865 + }; 866 + MODULE_DEVICE_TABLE(of, exynos_rotator_match); 373 867 374 868 static const struct dev_pm_ops rotator_pm_ops = { 375 869 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, ··· 438 820 .probe = rotator_probe, 439 821 .remove = rotator_remove, 440 822 .driver = { 441 - .name = "exynos-rot", 823 + .name = "exynos-rotator", 442 824 .owner = THIS_MODULE, 443 825 .pm = &rotator_pm_ops, 444 826 .of_match_table = exynos_rotator_match,
+694
drivers/gpu/drm/exynos/exynos_drm_scaler.c
··· 1 + /* 2 + * Copyright (C) 2017 Samsung Electronics Co.Ltd 3 + * Author: 4 + * Andrzej Pietrasiewicz <andrzej.p@samsung.com> 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License version 2 as 8 + * published by the Free Software Foundationr 9 + */ 10 + 11 + #include <linux/kernel.h> 12 + #include <linux/component.h> 13 + #include <linux/err.h> 14 + #include <linux/interrupt.h> 15 + #include <linux/io.h> 16 + #include <linux/platform_device.h> 17 + #include <linux/clk.h> 18 + #include <linux/of_device.h> 19 + #include <linux/pm_runtime.h> 20 + 21 + #include <drm/drmP.h> 22 + #include <drm/exynos_drm.h> 23 + #include "regs-scaler.h" 24 + #include "exynos_drm_fb.h" 25 + #include "exynos_drm_drv.h" 26 + #include "exynos_drm_iommu.h" 27 + #include "exynos_drm_ipp.h" 28 + 29 + #define scaler_read(offset) readl(scaler->regs + (offset)) 30 + #define scaler_write(cfg, offset) writel(cfg, scaler->regs + (offset)) 31 + #define SCALER_MAX_CLK 4 32 + #define SCALER_AUTOSUSPEND_DELAY 2000 33 + 34 + struct scaler_data { 35 + const char *clk_name[SCALER_MAX_CLK]; 36 + unsigned int num_clk; 37 + const struct exynos_drm_ipp_formats *formats; 38 + unsigned int num_formats; 39 + }; 40 + 41 + struct scaler_context { 42 + struct exynos_drm_ipp ipp; 43 + struct drm_device *drm_dev; 44 + struct device *dev; 45 + void __iomem *regs; 46 + struct clk *clock[SCALER_MAX_CLK]; 47 + struct exynos_drm_ipp_task *task; 48 + const struct scaler_data *scaler_data; 49 + }; 50 + 51 + static u32 scaler_get_format(u32 drm_fmt) 52 + { 53 + switch (drm_fmt) { 54 + case DRM_FORMAT_NV21: 55 + return SCALER_YUV420_2P_UV; 56 + case DRM_FORMAT_NV12: 57 + return SCALER_YUV420_2P_VU; 58 + case DRM_FORMAT_YUV420: 59 + return SCALER_YUV420_3P; 60 + case DRM_FORMAT_YUYV: 61 + return SCALER_YUV422_1P_YUYV; 62 + case DRM_FORMAT_UYVY: 63 + return SCALER_YUV422_1P_UYVY; 64 + case DRM_FORMAT_YVYU: 65 + return SCALER_YUV422_1P_YVYU; 66 + case DRM_FORMAT_NV61: 67 + return SCALER_YUV422_2P_UV; 68 + case DRM_FORMAT_NV16: 69 + return SCALER_YUV422_2P_VU; 70 + case DRM_FORMAT_YUV422: 71 + return SCALER_YUV422_3P; 72 + case DRM_FORMAT_NV42: 73 + return SCALER_YUV444_2P_UV; 74 + case DRM_FORMAT_NV24: 75 + return SCALER_YUV444_2P_VU; 76 + case DRM_FORMAT_YUV444: 77 + return SCALER_YUV444_3P; 78 + case DRM_FORMAT_RGB565: 79 + return SCALER_RGB_565; 80 + case DRM_FORMAT_XRGB1555: 81 + return SCALER_ARGB1555; 82 + case DRM_FORMAT_ARGB1555: 83 + return SCALER_ARGB1555; 84 + case DRM_FORMAT_XRGB4444: 85 + return SCALER_ARGB4444; 86 + case DRM_FORMAT_ARGB4444: 87 + return SCALER_ARGB4444; 88 + case DRM_FORMAT_XRGB8888: 89 + return SCALER_ARGB8888; 90 + case DRM_FORMAT_ARGB8888: 91 + return SCALER_ARGB8888; 92 + case DRM_FORMAT_RGBX8888: 93 + return SCALER_RGBA8888; 94 + case DRM_FORMAT_RGBA8888: 95 + return SCALER_RGBA8888; 96 + default: 97 + break; 98 + } 99 + 100 + return 0; 101 + } 102 + 103 + static inline void scaler_enable_int(struct scaler_context *scaler) 104 + { 105 + u32 val; 106 + 107 + val = SCALER_INT_EN_TIMEOUT | 108 + SCALER_INT_EN_ILLEGAL_BLEND | 109 + SCALER_INT_EN_ILLEGAL_RATIO | 110 + SCALER_INT_EN_ILLEGAL_DST_HEIGHT | 111 + SCALER_INT_EN_ILLEGAL_DST_WIDTH | 112 + SCALER_INT_EN_ILLEGAL_DST_V_POS | 113 + SCALER_INT_EN_ILLEGAL_DST_H_POS | 114 + SCALER_INT_EN_ILLEGAL_DST_C_SPAN | 115 + SCALER_INT_EN_ILLEGAL_DST_Y_SPAN | 116 + SCALER_INT_EN_ILLEGAL_DST_CR_BASE | 117 + SCALER_INT_EN_ILLEGAL_DST_CB_BASE | 118 + SCALER_INT_EN_ILLEGAL_DST_Y_BASE | 119 + SCALER_INT_EN_ILLEGAL_DST_COLOR | 120 + SCALER_INT_EN_ILLEGAL_SRC_HEIGHT | 121 + SCALER_INT_EN_ILLEGAL_SRC_WIDTH | 122 + SCALER_INT_EN_ILLEGAL_SRC_CV_POS | 123 + SCALER_INT_EN_ILLEGAL_SRC_CH_POS | 124 + SCALER_INT_EN_ILLEGAL_SRC_YV_POS | 125 + SCALER_INT_EN_ILLEGAL_SRC_YH_POS | 126 + SCALER_INT_EN_ILLEGAL_DST_SPAN | 127 + SCALER_INT_EN_ILLEGAL_SRC_Y_SPAN | 128 + SCALER_INT_EN_ILLEGAL_SRC_CR_BASE | 129 + SCALER_INT_EN_ILLEGAL_SRC_CB_BASE | 130 + SCALER_INT_EN_ILLEGAL_SRC_Y_BASE | 131 + SCALER_INT_EN_ILLEGAL_SRC_COLOR | 132 + SCALER_INT_EN_FRAME_END; 133 + scaler_write(val, SCALER_INT_EN); 134 + } 135 + 136 + static inline void scaler_set_src_fmt(struct scaler_context *scaler, 137 + u32 src_fmt) 138 + { 139 + u32 val; 140 + 141 + val = SCALER_SRC_CFG_SET_COLOR_FORMAT(src_fmt); 142 + scaler_write(val, SCALER_SRC_CFG); 143 + } 144 + 145 + static inline void scaler_set_src_base(struct scaler_context *scaler, 146 + struct exynos_drm_ipp_buffer *src_buf) 147 + { 148 + static unsigned int bases[] = { 149 + SCALER_SRC_Y_BASE, 150 + SCALER_SRC_CB_BASE, 151 + SCALER_SRC_CR_BASE, 152 + }; 153 + int i; 154 + 155 + for (i = 0; i < src_buf->format->num_planes; ++i) 156 + scaler_write(src_buf->dma_addr[i], bases[i]); 157 + } 158 + 159 + static inline void scaler_set_src_span(struct scaler_context *scaler, 160 + struct exynos_drm_ipp_buffer *src_buf) 161 + { 162 + u32 val; 163 + 164 + val = SCALER_SRC_SPAN_SET_Y_SPAN(src_buf->buf.pitch[0] / 165 + src_buf->format->cpp[0]); 166 + 167 + if (src_buf->format->num_planes > 1) 168 + val |= SCALER_SRC_SPAN_SET_C_SPAN(src_buf->buf.pitch[1]); 169 + 170 + scaler_write(val, SCALER_SRC_SPAN); 171 + } 172 + 173 + static inline void scaler_set_src_luma_pos(struct scaler_context *scaler, 174 + struct drm_exynos_ipp_task_rect *src_pos) 175 + { 176 + u32 val; 177 + 178 + val = SCALER_SRC_Y_POS_SET_YH_POS(src_pos->x << 2); 179 + val |= SCALER_SRC_Y_POS_SET_YV_POS(src_pos->y << 2); 180 + scaler_write(val, SCALER_SRC_Y_POS); 181 + scaler_write(val, SCALER_SRC_C_POS); /* ATTENTION! */ 182 + } 183 + 184 + static inline void scaler_set_src_wh(struct scaler_context *scaler, 185 + struct drm_exynos_ipp_task_rect *src_pos) 186 + { 187 + u32 val; 188 + 189 + val = SCALER_SRC_WH_SET_WIDTH(src_pos->w); 190 + val |= SCALER_SRC_WH_SET_HEIGHT(src_pos->h); 191 + scaler_write(val, SCALER_SRC_WH); 192 + } 193 + 194 + static inline void scaler_set_dst_fmt(struct scaler_context *scaler, 195 + u32 dst_fmt) 196 + { 197 + u32 val; 198 + 199 + val = SCALER_DST_CFG_SET_COLOR_FORMAT(dst_fmt); 200 + scaler_write(val, SCALER_DST_CFG); 201 + } 202 + 203 + static inline void scaler_set_dst_base(struct scaler_context *scaler, 204 + struct exynos_drm_ipp_buffer *dst_buf) 205 + { 206 + static unsigned int bases[] = { 207 + SCALER_DST_Y_BASE, 208 + SCALER_DST_CB_BASE, 209 + SCALER_DST_CR_BASE, 210 + }; 211 + int i; 212 + 213 + for (i = 0; i < dst_buf->format->num_planes; ++i) 214 + scaler_write(dst_buf->dma_addr[i], bases[i]); 215 + } 216 + 217 + static inline void scaler_set_dst_span(struct scaler_context *scaler, 218 + struct exynos_drm_ipp_buffer *dst_buf) 219 + { 220 + u32 val; 221 + 222 + val = SCALER_DST_SPAN_SET_Y_SPAN(dst_buf->buf.pitch[0] / 223 + dst_buf->format->cpp[0]); 224 + 225 + if (dst_buf->format->num_planes > 1) 226 + val |= SCALER_DST_SPAN_SET_C_SPAN(dst_buf->buf.pitch[1]); 227 + 228 + scaler_write(val, SCALER_DST_SPAN); 229 + } 230 + 231 + static inline void scaler_set_dst_luma_pos(struct scaler_context *scaler, 232 + struct drm_exynos_ipp_task_rect *dst_pos) 233 + { 234 + u32 val; 235 + 236 + val = SCALER_DST_WH_SET_WIDTH(dst_pos->w); 237 + val |= SCALER_DST_WH_SET_HEIGHT(dst_pos->h); 238 + scaler_write(val, SCALER_DST_WH); 239 + } 240 + 241 + static inline void scaler_set_dst_wh(struct scaler_context *scaler, 242 + struct drm_exynos_ipp_task_rect *dst_pos) 243 + { 244 + u32 val; 245 + 246 + val = SCALER_DST_POS_SET_H_POS(dst_pos->x); 247 + val |= SCALER_DST_POS_SET_V_POS(dst_pos->y); 248 + scaler_write(val, SCALER_DST_POS); 249 + } 250 + 251 + static inline void scaler_set_hv_ratio(struct scaler_context *scaler, 252 + unsigned int rotation, 253 + struct drm_exynos_ipp_task_rect *src_pos, 254 + struct drm_exynos_ipp_task_rect *dst_pos) 255 + { 256 + u32 val, h_ratio, v_ratio; 257 + 258 + if (drm_rotation_90_or_270(rotation)) { 259 + h_ratio = (src_pos->h << 16) / dst_pos->w; 260 + v_ratio = (src_pos->w << 16) / dst_pos->h; 261 + } else { 262 + h_ratio = (src_pos->w << 16) / dst_pos->w; 263 + v_ratio = (src_pos->h << 16) / dst_pos->h; 264 + } 265 + 266 + val = SCALER_H_RATIO_SET(h_ratio); 267 + scaler_write(val, SCALER_H_RATIO); 268 + 269 + val = SCALER_V_RATIO_SET(v_ratio); 270 + scaler_write(val, SCALER_V_RATIO); 271 + } 272 + 273 + static inline void scaler_set_rotation(struct scaler_context *scaler, 274 + unsigned int rotation) 275 + { 276 + u32 val = 0; 277 + 278 + if (rotation & DRM_MODE_ROTATE_90) 279 + val |= SCALER_ROT_CFG_SET_ROTMODE(SCALER_ROT_MODE_90); 280 + else if (rotation & DRM_MODE_ROTATE_180) 281 + val |= SCALER_ROT_CFG_SET_ROTMODE(SCALER_ROT_MODE_180); 282 + else if (rotation & DRM_MODE_ROTATE_270) 283 + val |= SCALER_ROT_CFG_SET_ROTMODE(SCALER_ROT_MODE_270); 284 + if (rotation & DRM_MODE_REFLECT_X) 285 + val |= SCALER_ROT_CFG_FLIP_X_EN; 286 + if (rotation & DRM_MODE_REFLECT_Y) 287 + val |= SCALER_ROT_CFG_FLIP_Y_EN; 288 + scaler_write(val, SCALER_ROT_CFG); 289 + } 290 + 291 + static inline void scaler_set_csc(struct scaler_context *scaler, 292 + const struct drm_format_info *fmt) 293 + { 294 + static const u32 csc_mtx[2][3][3] = { 295 + { /* YCbCr to RGB */ 296 + {0x254, 0x000, 0x331}, 297 + {0x254, 0xf38, 0xe60}, 298 + {0x254, 0x409, 0x000}, 299 + }, 300 + { /* RGB to YCbCr */ 301 + {0x084, 0x102, 0x032}, 302 + {0xfb4, 0xf6b, 0x0e1}, 303 + {0x0e1, 0xf44, 0xfdc}, 304 + }, 305 + }; 306 + int i, j, dir; 307 + 308 + switch (fmt->format) { 309 + case DRM_FORMAT_RGB565: 310 + case DRM_FORMAT_XRGB1555: 311 + case DRM_FORMAT_ARGB1555: 312 + case DRM_FORMAT_XRGB4444: 313 + case DRM_FORMAT_ARGB4444: 314 + case DRM_FORMAT_XRGB8888: 315 + case DRM_FORMAT_ARGB8888: 316 + case DRM_FORMAT_RGBX8888: 317 + case DRM_FORMAT_RGBA8888: 318 + dir = 1; 319 + break; 320 + default: 321 + dir = 0; 322 + } 323 + 324 + for (i = 0; i < 3; i++) 325 + for (j = 0; j < 3; j++) 326 + scaler_write(csc_mtx[dir][i][j], SCALER_CSC_COEF(j, i)); 327 + } 328 + 329 + static inline void scaler_set_timer(struct scaler_context *scaler, 330 + unsigned int timer, unsigned int divider) 331 + { 332 + u32 val; 333 + 334 + val = SCALER_TIMEOUT_CTRL_TIMER_ENABLE; 335 + val |= SCALER_TIMEOUT_CTRL_SET_TIMER_VALUE(timer); 336 + val |= SCALER_TIMEOUT_CTRL_SET_TIMER_DIV(divider); 337 + scaler_write(val, SCALER_TIMEOUT_CTRL); 338 + } 339 + 340 + static inline void scaler_start_hw(struct scaler_context *scaler) 341 + { 342 + scaler_write(SCALER_CFG_START_CMD, SCALER_CFG); 343 + } 344 + 345 + static int scaler_commit(struct exynos_drm_ipp *ipp, 346 + struct exynos_drm_ipp_task *task) 347 + { 348 + struct scaler_context *scaler = 349 + container_of(ipp, struct scaler_context, ipp); 350 + 351 + u32 src_fmt = scaler_get_format(task->src.buf.fourcc); 352 + struct drm_exynos_ipp_task_rect *src_pos = &task->src.rect; 353 + 354 + u32 dst_fmt = scaler_get_format(task->dst.buf.fourcc); 355 + struct drm_exynos_ipp_task_rect *dst_pos = &task->dst.rect; 356 + 357 + scaler->task = task; 358 + 359 + pm_runtime_get_sync(scaler->dev); 360 + 361 + scaler_set_src_fmt(scaler, src_fmt); 362 + scaler_set_src_base(scaler, &task->src); 363 + scaler_set_src_span(scaler, &task->src); 364 + scaler_set_src_luma_pos(scaler, src_pos); 365 + scaler_set_src_wh(scaler, src_pos); 366 + 367 + scaler_set_dst_fmt(scaler, dst_fmt); 368 + scaler_set_dst_base(scaler, &task->dst); 369 + scaler_set_dst_span(scaler, &task->dst); 370 + scaler_set_dst_luma_pos(scaler, dst_pos); 371 + scaler_set_dst_wh(scaler, dst_pos); 372 + 373 + scaler_set_hv_ratio(scaler, task->transform.rotation, src_pos, dst_pos); 374 + scaler_set_rotation(scaler, task->transform.rotation); 375 + 376 + scaler_set_csc(scaler, task->src.format); 377 + 378 + scaler_set_timer(scaler, 0xffff, 0xf); 379 + 380 + scaler_enable_int(scaler); 381 + scaler_start_hw(scaler); 382 + 383 + return 0; 384 + } 385 + 386 + static struct exynos_drm_ipp_funcs ipp_funcs = { 387 + .commit = scaler_commit, 388 + }; 389 + 390 + static inline void scaler_disable_int(struct scaler_context *scaler) 391 + { 392 + scaler_write(0, SCALER_INT_EN); 393 + } 394 + 395 + static inline u32 scaler_get_int_status(struct scaler_context *scaler) 396 + { 397 + return scaler_read(SCALER_INT_STATUS); 398 + } 399 + 400 + static inline bool scaler_task_done(u32 val) 401 + { 402 + return val & SCALER_INT_STATUS_FRAME_END ? 0 : -EINVAL; 403 + } 404 + 405 + static irqreturn_t scaler_irq_handler(int irq, void *arg) 406 + { 407 + struct scaler_context *scaler = arg; 408 + 409 + u32 val = scaler_get_int_status(scaler); 410 + 411 + scaler_disable_int(scaler); 412 + 413 + if (scaler->task) { 414 + struct exynos_drm_ipp_task *task = scaler->task; 415 + 416 + scaler->task = NULL; 417 + pm_runtime_mark_last_busy(scaler->dev); 418 + pm_runtime_put_autosuspend(scaler->dev); 419 + exynos_drm_ipp_task_done(task, scaler_task_done(val)); 420 + } 421 + 422 + return IRQ_HANDLED; 423 + } 424 + 425 + static int scaler_bind(struct device *dev, struct device *master, void *data) 426 + { 427 + struct scaler_context *scaler = dev_get_drvdata(dev); 428 + struct drm_device *drm_dev = data; 429 + struct exynos_drm_ipp *ipp = &scaler->ipp; 430 + 431 + scaler->drm_dev = drm_dev; 432 + drm_iommu_attach_device(drm_dev, dev); 433 + 434 + exynos_drm_ipp_register(drm_dev, ipp, &ipp_funcs, 435 + DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE | 436 + DRM_EXYNOS_IPP_CAP_SCALE | DRM_EXYNOS_IPP_CAP_CONVERT, 437 + scaler->scaler_data->formats, 438 + scaler->scaler_data->num_formats, "scaler"); 439 + 440 + dev_info(dev, "The exynos scaler has been probed successfully\n"); 441 + 442 + return 0; 443 + } 444 + 445 + static void scaler_unbind(struct device *dev, struct device *master, 446 + void *data) 447 + { 448 + struct scaler_context *scaler = dev_get_drvdata(dev); 449 + struct drm_device *drm_dev = data; 450 + struct exynos_drm_ipp *ipp = &scaler->ipp; 451 + 452 + exynos_drm_ipp_unregister(drm_dev, ipp); 453 + drm_iommu_detach_device(scaler->drm_dev, scaler->dev); 454 + } 455 + 456 + static const struct component_ops scaler_component_ops = { 457 + .bind = scaler_bind, 458 + .unbind = scaler_unbind, 459 + }; 460 + 461 + static int scaler_probe(struct platform_device *pdev) 462 + { 463 + struct device *dev = &pdev->dev; 464 + struct resource *regs_res; 465 + struct scaler_context *scaler; 466 + int irq; 467 + int ret, i; 468 + 469 + scaler = devm_kzalloc(dev, sizeof(*scaler), GFP_KERNEL); 470 + if (!scaler) 471 + return -ENOMEM; 472 + 473 + scaler->scaler_data = 474 + (struct scaler_data *)of_device_get_match_data(dev); 475 + 476 + scaler->dev = dev; 477 + regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 478 + scaler->regs = devm_ioremap_resource(dev, regs_res); 479 + if (IS_ERR(scaler->regs)) 480 + return PTR_ERR(scaler->regs); 481 + 482 + irq = platform_get_irq(pdev, 0); 483 + if (irq < 0) { 484 + dev_err(dev, "failed to get irq\n"); 485 + return irq; 486 + } 487 + 488 + ret = devm_request_threaded_irq(dev, irq, NULL, scaler_irq_handler, 489 + IRQF_ONESHOT, "drm_scaler", scaler); 490 + if (ret < 0) { 491 + dev_err(dev, "failed to request irq\n"); 492 + return ret; 493 + } 494 + 495 + for (i = 0; i < scaler->scaler_data->num_clk; ++i) { 496 + scaler->clock[i] = devm_clk_get(dev, 497 + scaler->scaler_data->clk_name[i]); 498 + if (IS_ERR(scaler->clock[i])) { 499 + dev_err(dev, "failed to get clock\n"); 500 + return PTR_ERR(scaler->clock[i]); 501 + } 502 + } 503 + 504 + pm_runtime_use_autosuspend(dev); 505 + pm_runtime_set_autosuspend_delay(dev, SCALER_AUTOSUSPEND_DELAY); 506 + pm_runtime_enable(dev); 507 + platform_set_drvdata(pdev, scaler); 508 + 509 + ret = component_add(dev, &scaler_component_ops); 510 + if (ret) 511 + goto err_ippdrv_register; 512 + 513 + return 0; 514 + 515 + err_ippdrv_register: 516 + pm_runtime_dont_use_autosuspend(dev); 517 + pm_runtime_disable(dev); 518 + return ret; 519 + } 520 + 521 + static int scaler_remove(struct platform_device *pdev) 522 + { 523 + struct device *dev = &pdev->dev; 524 + 525 + component_del(dev, &scaler_component_ops); 526 + pm_runtime_dont_use_autosuspend(dev); 527 + pm_runtime_disable(dev); 528 + 529 + return 0; 530 + } 531 + 532 + #ifdef CONFIG_PM 533 + 534 + static int clk_disable_unprepare_wrapper(struct clk *clk) 535 + { 536 + clk_disable_unprepare(clk); 537 + 538 + return 0; 539 + } 540 + 541 + static int scaler_clk_ctrl(struct scaler_context *scaler, bool enable) 542 + { 543 + int (*clk_fun)(struct clk *clk), i; 544 + 545 + clk_fun = enable ? clk_prepare_enable : clk_disable_unprepare_wrapper; 546 + 547 + for (i = 0; i < scaler->scaler_data->num_clk; ++i) 548 + clk_fun(scaler->clock[i]); 549 + 550 + return 0; 551 + } 552 + 553 + static int scaler_runtime_suspend(struct device *dev) 554 + { 555 + struct scaler_context *scaler = dev_get_drvdata(dev); 556 + 557 + return scaler_clk_ctrl(scaler, false); 558 + } 559 + 560 + static int scaler_runtime_resume(struct device *dev) 561 + { 562 + struct scaler_context *scaler = dev_get_drvdata(dev); 563 + 564 + return scaler_clk_ctrl(scaler, true); 565 + } 566 + #endif 567 + 568 + static const struct dev_pm_ops scaler_pm_ops = { 569 + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, 570 + pm_runtime_force_resume) 571 + SET_RUNTIME_PM_OPS(scaler_runtime_suspend, scaler_runtime_resume, NULL) 572 + }; 573 + 574 + static const struct drm_exynos_ipp_limit scaler_5420_two_pixel_hv_limits[] = { 575 + { IPP_SIZE_LIMIT(BUFFER, .h = { 16, SZ_8K }, .v = { 16, SZ_8K }) }, 576 + { IPP_SIZE_LIMIT(AREA, .h.align = 2, .v.align = 2) }, 577 + { IPP_SCALE_LIMIT(.h = { 65536 * 1 / 4, 65536 * 16 }, 578 + .v = { 65536 * 1 / 4, 65536 * 16 }) }, 579 + }; 580 + 581 + static const struct drm_exynos_ipp_limit scaler_5420_two_pixel_h_limits[] = { 582 + { IPP_SIZE_LIMIT(BUFFER, .h = { 16, SZ_8K }, .v = { 16, SZ_8K }) }, 583 + { IPP_SIZE_LIMIT(AREA, .h.align = 2, .v.align = 1) }, 584 + { IPP_SCALE_LIMIT(.h = { 65536 * 1 / 4, 65536 * 16 }, 585 + .v = { 65536 * 1 / 4, 65536 * 16 }) }, 586 + }; 587 + 588 + static const struct drm_exynos_ipp_limit scaler_5420_one_pixel_limits[] = { 589 + { IPP_SIZE_LIMIT(BUFFER, .h = { 16, SZ_8K }, .v = { 16, SZ_8K }) }, 590 + { IPP_SCALE_LIMIT(.h = { 65536 * 1 / 4, 65536 * 16 }, 591 + .v = { 65536 * 1 / 4, 65536 * 16 }) }, 592 + }; 593 + 594 + static const struct exynos_drm_ipp_formats exynos5420_formats[] = { 595 + /* SCALER_YUV420_2P_UV */ 596 + { IPP_SRCDST_FORMAT(NV21, scaler_5420_two_pixel_hv_limits) }, 597 + 598 + /* SCALER_YUV420_2P_VU */ 599 + { IPP_SRCDST_FORMAT(NV12, scaler_5420_two_pixel_hv_limits) }, 600 + 601 + /* SCALER_YUV420_3P */ 602 + { IPP_SRCDST_FORMAT(YUV420, scaler_5420_two_pixel_hv_limits) }, 603 + 604 + /* SCALER_YUV422_1P_YUYV */ 605 + { IPP_SRCDST_FORMAT(YUYV, scaler_5420_two_pixel_h_limits) }, 606 + 607 + /* SCALER_YUV422_1P_UYVY */ 608 + { IPP_SRCDST_FORMAT(UYVY, scaler_5420_two_pixel_h_limits) }, 609 + 610 + /* SCALER_YUV422_1P_YVYU */ 611 + { IPP_SRCDST_FORMAT(YVYU, scaler_5420_two_pixel_h_limits) }, 612 + 613 + /* SCALER_YUV422_2P_UV */ 614 + { IPP_SRCDST_FORMAT(NV61, scaler_5420_two_pixel_h_limits) }, 615 + 616 + /* SCALER_YUV422_2P_VU */ 617 + { IPP_SRCDST_FORMAT(NV16, scaler_5420_two_pixel_h_limits) }, 618 + 619 + /* SCALER_YUV422_3P */ 620 + { IPP_SRCDST_FORMAT(YUV422, scaler_5420_two_pixel_h_limits) }, 621 + 622 + /* SCALER_YUV444_2P_UV */ 623 + { IPP_SRCDST_FORMAT(NV42, scaler_5420_one_pixel_limits) }, 624 + 625 + /* SCALER_YUV444_2P_VU */ 626 + { IPP_SRCDST_FORMAT(NV24, scaler_5420_one_pixel_limits) }, 627 + 628 + /* SCALER_YUV444_3P */ 629 + { IPP_SRCDST_FORMAT(YUV444, scaler_5420_one_pixel_limits) }, 630 + 631 + /* SCALER_RGB_565 */ 632 + { IPP_SRCDST_FORMAT(RGB565, scaler_5420_one_pixel_limits) }, 633 + 634 + /* SCALER_ARGB1555 */ 635 + { IPP_SRCDST_FORMAT(XRGB1555, scaler_5420_one_pixel_limits) }, 636 + 637 + /* SCALER_ARGB1555 */ 638 + { IPP_SRCDST_FORMAT(ARGB1555, scaler_5420_one_pixel_limits) }, 639 + 640 + /* SCALER_ARGB4444 */ 641 + { IPP_SRCDST_FORMAT(XRGB4444, scaler_5420_one_pixel_limits) }, 642 + 643 + /* SCALER_ARGB4444 */ 644 + { IPP_SRCDST_FORMAT(ARGB4444, scaler_5420_one_pixel_limits) }, 645 + 646 + /* SCALER_ARGB8888 */ 647 + { IPP_SRCDST_FORMAT(XRGB8888, scaler_5420_one_pixel_limits) }, 648 + 649 + /* SCALER_ARGB8888 */ 650 + { IPP_SRCDST_FORMAT(ARGB8888, scaler_5420_one_pixel_limits) }, 651 + 652 + /* SCALER_RGBA8888 */ 653 + { IPP_SRCDST_FORMAT(RGBX8888, scaler_5420_one_pixel_limits) }, 654 + 655 + /* SCALER_RGBA8888 */ 656 + { IPP_SRCDST_FORMAT(RGBA8888, scaler_5420_one_pixel_limits) }, 657 + }; 658 + 659 + static const struct scaler_data exynos5420_data = { 660 + .clk_name = {"mscl"}, 661 + .num_clk = 1, 662 + .formats = exynos5420_formats, 663 + .num_formats = ARRAY_SIZE(exynos5420_formats), 664 + }; 665 + 666 + static const struct scaler_data exynos5433_data = { 667 + .clk_name = {"pclk", "aclk", "aclk_xiu"}, 668 + .num_clk = 3, 669 + .formats = exynos5420_formats, /* intentional */ 670 + .num_formats = ARRAY_SIZE(exynos5420_formats), 671 + }; 672 + 673 + static const struct of_device_id exynos_scaler_match[] = { 674 + { 675 + .compatible = "samsung,exynos5420-scaler", 676 + .data = &exynos5420_data, 677 + }, { 678 + .compatible = "samsung,exynos5433-scaler", 679 + .data = &exynos5433_data, 680 + }, { 681 + }, 682 + }; 683 + MODULE_DEVICE_TABLE(of, exynos_scaler_match); 684 + 685 + struct platform_driver scaler_driver = { 686 + .probe = scaler_probe, 687 + .remove = scaler_remove, 688 + .driver = { 689 + .name = "exynos-scaler", 690 + .owner = THIS_MODULE, 691 + .pm = &scaler_pm_ops, 692 + .of_match_table = exynos_scaler_match, 693 + }, 694 + };
-2
drivers/gpu/drm/exynos/exynos_hdmi.c
··· 954 954 drm_mode_connector_attach_encoder(connector, encoder); 955 955 956 956 if (hdata->bridge) { 957 - encoder->bridge = hdata->bridge; 958 - hdata->bridge->encoder = encoder; 959 957 ret = drm_bridge_attach(encoder, hdata->bridge, NULL); 960 958 if (ret) 961 959 DRM_ERROR("Failed to attach bridge\n");
+17 -5
drivers/gpu/drm/exynos/exynos_mixer.c
··· 473 473 chroma_addr[1] = chroma_addr[0] + 0x40; 474 474 } else { 475 475 luma_addr[1] = luma_addr[0] + fb->pitches[0]; 476 - chroma_addr[1] = chroma_addr[0] + fb->pitches[0]; 476 + chroma_addr[1] = chroma_addr[0] + fb->pitches[1]; 477 477 } 478 478 } else { 479 479 luma_addr[1] = 0; ··· 482 482 483 483 spin_lock_irqsave(&ctx->reg_slock, flags); 484 484 485 + vp_reg_write(ctx, VP_SHADOW_UPDATE, 1); 485 486 /* interlace or progressive scan mode */ 486 487 val = (test_bit(MXR_BIT_INTERLACE, &ctx->flags) ? ~0 : 0); 487 488 vp_reg_writemask(ctx, VP_MODE, val, VP_MODE_LINE_SKIP); ··· 496 495 vp_reg_write(ctx, VP_IMG_SIZE_Y, VP_IMG_HSIZE(fb->pitches[0]) | 497 496 VP_IMG_VSIZE(fb->height)); 498 497 /* chroma plane for NV12/NV21 is half the height of the luma plane */ 499 - vp_reg_write(ctx, VP_IMG_SIZE_C, VP_IMG_HSIZE(fb->pitches[0]) | 498 + vp_reg_write(ctx, VP_IMG_SIZE_C, VP_IMG_HSIZE(fb->pitches[1]) | 500 499 VP_IMG_VSIZE(fb->height / 2)); 501 500 502 501 vp_reg_write(ctx, VP_SRC_WIDTH, state->src.w); 503 - vp_reg_write(ctx, VP_SRC_HEIGHT, state->src.h); 504 502 vp_reg_write(ctx, VP_SRC_H_POSITION, 505 503 VP_SRC_H_POSITION_VAL(state->src.x)); 506 - vp_reg_write(ctx, VP_SRC_V_POSITION, state->src.y); 507 - 508 504 vp_reg_write(ctx, VP_DST_WIDTH, state->crtc.w); 509 505 vp_reg_write(ctx, VP_DST_H_POSITION, state->crtc.x); 506 + 510 507 if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) { 508 + vp_reg_write(ctx, VP_SRC_HEIGHT, state->src.h / 2); 509 + vp_reg_write(ctx, VP_SRC_V_POSITION, state->src.y / 2); 511 510 vp_reg_write(ctx, VP_DST_HEIGHT, state->crtc.h / 2); 512 511 vp_reg_write(ctx, VP_DST_V_POSITION, state->crtc.y / 2); 513 512 } else { 513 + vp_reg_write(ctx, VP_SRC_HEIGHT, state->src.h); 514 + vp_reg_write(ctx, VP_SRC_V_POSITION, state->src.y); 514 515 vp_reg_write(ctx, VP_DST_HEIGHT, state->crtc.h); 515 516 vp_reg_write(ctx, VP_DST_V_POSITION, state->crtc.y); 516 517 } ··· 702 699 703 700 /* interlace scan need to check shadow register */ 704 701 if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) { 702 + if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags) && 703 + vp_reg_read(ctx, VP_SHADOW_UPDATE)) 704 + goto out; 705 + 706 + base = mixer_reg_read(ctx, MXR_CFG); 707 + shadow = mixer_reg_read(ctx, MXR_CFG_S); 708 + if (base != shadow) 709 + goto out; 710 + 705 711 base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(0)); 706 712 shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(0)); 707 713 if (base != shadow)
+1
drivers/gpu/drm/exynos/regs-mixer.h
··· 47 47 #define MXR_MO 0x0304 48 48 #define MXR_RESOLUTION 0x0310 49 49 50 + #define MXR_CFG_S 0x2004 50 51 #define MXR_GRAPHIC0_BASE_S 0x2024 51 52 #define MXR_GRAPHIC1_BASE_S 0x2044 52 53
+426
drivers/gpu/drm/exynos/regs-scaler.h
··· 1 + /* drivers/gpu/drm/exynos/regs-scaler.h 2 + * 3 + * Copyright (c) 2017 Samsung Electronics Co., Ltd. 4 + * http://www.samsung.com/ 5 + * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com> 6 + * 7 + * Register definition file for Samsung scaler driver 8 + * 9 + * This program is free software; you can redistribute it and/or modify 10 + * it under the terms of the GNU General Public License version 2 as 11 + * published by the Free Software Foundation. 12 + */ 13 + 14 + #ifndef EXYNOS_REGS_SCALER_H 15 + #define EXYNOS_REGS_SCALER_H 16 + 17 + /* Register part */ 18 + 19 + /* Global setting */ 20 + #define SCALER_STATUS 0x0 /* no shadow */ 21 + #define SCALER_CFG 0x4 22 + 23 + /* Interrupt */ 24 + #define SCALER_INT_EN 0x8 /* no shadow */ 25 + #define SCALER_INT_STATUS 0xc /* no shadow */ 26 + 27 + /* SRC */ 28 + #define SCALER_SRC_CFG 0x10 29 + #define SCALER_SRC_Y_BASE 0x14 30 + #define SCALER_SRC_CB_BASE 0x18 31 + #define SCALER_SRC_CR_BASE 0x294 32 + #define SCALER_SRC_SPAN 0x1c 33 + #define SCALER_SRC_Y_POS 0x20 34 + #define SCALER_SRC_WH 0x24 35 + #define SCALER_SRC_C_POS 0x28 36 + 37 + /* DST */ 38 + #define SCALER_DST_CFG 0x30 39 + #define SCALER_DST_Y_BASE 0x34 40 + #define SCALER_DST_CB_BASE 0x38 41 + #define SCALER_DST_CR_BASE 0x298 42 + #define SCALER_DST_SPAN 0x3c 43 + #define SCALER_DST_WH 0x40 44 + #define SCALER_DST_POS 0x44 45 + 46 + /* Ratio */ 47 + #define SCALER_H_RATIO 0x50 48 + #define SCALER_V_RATIO 0x54 49 + 50 + /* Rotation */ 51 + #define SCALER_ROT_CFG 0x58 52 + 53 + /* Coefficient */ 54 + /* 55 + * YHCOEF_{x}{A|B|C|D} CHCOEF_{x}{A|B|C|D} 56 + * 57 + * A B C D A B C D 58 + * 0 60 64 68 6c 140 144 148 14c 59 + * 1 70 74 78 7c 150 154 158 15c 60 + * 2 80 84 88 8c 160 164 168 16c 61 + * 3 90 94 98 9c 170 174 178 17c 62 + * 4 a0 a4 a8 ac 180 184 188 18c 63 + * 5 b0 b4 b8 bc 190 194 198 19c 64 + * 6 c0 c4 c8 cc 1a0 1a4 1a8 1ac 65 + * 7 d0 d4 d8 dc 1b0 1b4 1b8 1bc 66 + * 8 e0 e4 e8 ec 1c0 1c4 1c8 1cc 67 + * 68 + * 69 + * YVCOEF_{x}{A|B} CVCOEF_{x}{A|B} 70 + * 71 + * A B A B 72 + * 0 f0 f4 1d0 1d4 73 + * 1 f8 fc 1d8 1dc 74 + * 2 100 104 1e0 1e4 75 + * 3 108 10c 1e8 1ec 76 + * 4 110 114 1f0 1f4 77 + * 5 118 11c 1f8 1fc 78 + * 6 120 124 200 204 79 + * 7 128 12c 208 20c 80 + * 8 130 134 210 214 81 + */ 82 + #define _SCALER_HCOEF_DELTA(r, c) ((r) * 0x10 + (c) * 0x4) 83 + #define _SCALER_VCOEF_DELTA(r, c) ((r) * 0x8 + (c) * 0x4) 84 + 85 + #define SCALER_YHCOEF(r, c) (0x60 + _SCALER_HCOEF_DELTA((r), (c))) 86 + #define SCALER_YVCOEF(r, c) (0xf0 + _SCALER_VCOEF_DELTA((r), (c))) 87 + #define SCALER_CHCOEF(r, c) (0x140 + _SCALER_HCOEF_DELTA((r), (c))) 88 + #define SCALER_CVCOEF(r, c) (0x1d0 + _SCALER_VCOEF_DELTA((r), (c))) 89 + 90 + 91 + /* Color Space Conversion */ 92 + #define SCALER_CSC_COEF(x, y) (0x220 + (y) * 0xc + (x) * 0x4) 93 + 94 + /* Dithering */ 95 + #define SCALER_DITH_CFG 0x250 96 + 97 + /* Version Number */ 98 + #define SCALER_VER 0x260 /* no shadow */ 99 + 100 + /* Cycle count and Timeout */ 101 + #define SCALER_CYCLE_COUNT 0x278 /* no shadow */ 102 + #define SCALER_TIMEOUT_CTRL 0x2c0 /* no shadow */ 103 + #define SCALER_TIMEOUT_CNT 0x2c4 /* no shadow */ 104 + 105 + /* Blending */ 106 + #define SCALER_SRC_BLEND_COLOR 0x280 107 + #define SCALER_SRC_BLEND_ALPHA 0x284 108 + #define SCALER_DST_BLEND_COLOR 0x288 109 + #define SCALER_DST_BLEND_ALPHA 0x28c 110 + 111 + /* Color Fill */ 112 + #define SCALER_FILL_COLOR 0x290 113 + 114 + /* Multiple Command Queue */ 115 + #define SCALER_ADDR_Q_CONFIG 0x2a0 /* no shadow */ 116 + #define SCALER_SRC_ADDR_Q_STATUS 0x2a4 /* no shadow */ 117 + #define SCALER_SRC_ADDR_Q 0x2a8 /* no shadow */ 118 + 119 + /* CRC */ 120 + #define SCALER_CRC_COLOR00_10 0x2b0 /* no shadow */ 121 + #define SCALER_CRC_COLOR20_30 0x2b4 /* no shadow */ 122 + #define SCALER_CRC_COLOR01_11 0x2b8 /* no shadow */ 123 + #define SCALER_CRC_COLOR21_31 0x2bc /* no shadow */ 124 + 125 + /* Shadow Registers */ 126 + #define SCALER_SHADOW_OFFSET 0x1000 127 + 128 + 129 + /* Bit definition part */ 130 + #define SCALER_MASK(hi_b, lo_b) ((1 << ((hi_b) - (lo_b) + 1)) - 1) 131 + #define SCALER_GET(reg, hi_b, lo_b) \ 132 + (((reg) >> (lo_b)) & SCALER_MASK(hi_b, lo_b)) 133 + #define SCALER_SET(val, hi_b, lo_b) \ 134 + (((val) & SCALER_MASK(hi_b, lo_b)) << lo_b) 135 + 136 + /* SCALER_STATUS */ 137 + #define SCALER_STATUS_SCALER_RUNNING (1 << 1) 138 + #define SCALER_STATUS_SCALER_READY_CLK_DOWN (1 << 0) 139 + 140 + /* SCALER_CFG */ 141 + #define SCALER_CFG_FILL_EN (1 << 24) 142 + #define SCALER_CFG_BLEND_COLOR_DIVIDE_ALPHA_EN (1 << 17) 143 + #define SCALER_CFG_BLEND_EN (1 << 16) 144 + #define SCALER_CFG_CSC_Y_OFFSET_SRC_EN (1 << 10) 145 + #define SCALER_CFG_CSC_Y_OFFSET_DST_EN (1 << 9) 146 + #define SCALER_CFG_16_BURST_MODE (1 << 8) 147 + #define SCALER_CFG_SOFT_RESET (1 << 1) 148 + #define SCALER_CFG_START_CMD (1 << 0) 149 + 150 + /* SCALER_INT_EN */ 151 + #define SCALER_INT_EN_TIMEOUT (1 << 31) 152 + #define SCALER_INT_EN_ILLEGAL_BLEND (1 << 24) 153 + #define SCALER_INT_EN_ILLEGAL_RATIO (1 << 23) 154 + #define SCALER_INT_EN_ILLEGAL_DST_HEIGHT (1 << 22) 155 + #define SCALER_INT_EN_ILLEGAL_DST_WIDTH (1 << 21) 156 + #define SCALER_INT_EN_ILLEGAL_DST_V_POS (1 << 20) 157 + #define SCALER_INT_EN_ILLEGAL_DST_H_POS (1 << 19) 158 + #define SCALER_INT_EN_ILLEGAL_DST_C_SPAN (1 << 18) 159 + #define SCALER_INT_EN_ILLEGAL_DST_Y_SPAN (1 << 17) 160 + #define SCALER_INT_EN_ILLEGAL_DST_CR_BASE (1 << 16) 161 + #define SCALER_INT_EN_ILLEGAL_DST_CB_BASE (1 << 15) 162 + #define SCALER_INT_EN_ILLEGAL_DST_Y_BASE (1 << 14) 163 + #define SCALER_INT_EN_ILLEGAL_DST_COLOR (1 << 13) 164 + #define SCALER_INT_EN_ILLEGAL_SRC_HEIGHT (1 << 12) 165 + #define SCALER_INT_EN_ILLEGAL_SRC_WIDTH (1 << 11) 166 + #define SCALER_INT_EN_ILLEGAL_SRC_CV_POS (1 << 10) 167 + #define SCALER_INT_EN_ILLEGAL_SRC_CH_POS (1 << 9) 168 + #define SCALER_INT_EN_ILLEGAL_SRC_YV_POS (1 << 8) 169 + #define SCALER_INT_EN_ILLEGAL_SRC_YH_POS (1 << 7) 170 + #define SCALER_INT_EN_ILLEGAL_DST_SPAN (1 << 6) 171 + #define SCALER_INT_EN_ILLEGAL_SRC_Y_SPAN (1 << 5) 172 + #define SCALER_INT_EN_ILLEGAL_SRC_CR_BASE (1 << 4) 173 + #define SCALER_INT_EN_ILLEGAL_SRC_CB_BASE (1 << 3) 174 + #define SCALER_INT_EN_ILLEGAL_SRC_Y_BASE (1 << 2) 175 + #define SCALER_INT_EN_ILLEGAL_SRC_COLOR (1 << 1) 176 + #define SCALER_INT_EN_FRAME_END (1 << 0) 177 + 178 + /* SCALER_INT_STATUS */ 179 + #define SCALER_INT_STATUS_TIMEOUT (1 << 31) 180 + #define SCALER_INT_STATUS_ILLEGAL_BLEND (1 << 24) 181 + #define SCALER_INT_STATUS_ILLEGAL_RATIO (1 << 23) 182 + #define SCALER_INT_STATUS_ILLEGAL_DST_HEIGHT (1 << 22) 183 + #define SCALER_INT_STATUS_ILLEGAL_DST_WIDTH (1 << 21) 184 + #define SCALER_INT_STATUS_ILLEGAL_DST_V_POS (1 << 20) 185 + #define SCALER_INT_STATUS_ILLEGAL_DST_H_POS (1 << 19) 186 + #define SCALER_INT_STATUS_ILLEGAL_DST_C_SPAN (1 << 18) 187 + #define SCALER_INT_STATUS_ILLEGAL_DST_Y_SPAN (1 << 17) 188 + #define SCALER_INT_STATUS_ILLEGAL_DST_CR_BASE (1 << 16) 189 + #define SCALER_INT_STATUS_ILLEGAL_DST_CB_BASE (1 << 15) 190 + #define SCALER_INT_STATUS_ILLEGAL_DST_Y_BASE (1 << 14) 191 + #define SCALER_INT_STATUS_ILLEGAL_DST_COLOR (1 << 13) 192 + #define SCALER_INT_STATUS_ILLEGAL_SRC_HEIGHT (1 << 12) 193 + #define SCALER_INT_STATUS_ILLEGAL_SRC_WIDTH (1 << 11) 194 + #define SCALER_INT_STATUS_ILLEGAL_SRC_CV_POS (1 << 10) 195 + #define SCALER_INT_STATUS_ILLEGAL_SRC_CH_POS (1 << 9) 196 + #define SCALER_INT_STATUS_ILLEGAL_SRC_YV_POS (1 << 8) 197 + #define SCALER_INT_STATUS_ILLEGAL_SRC_YH_POS (1 << 7) 198 + #define SCALER_INT_STATUS_ILLEGAL_DST_SPAN (1 << 6) 199 + #define SCALER_INT_STATUS_ILLEGAL_SRC_Y_SPAN (1 << 5) 200 + #define SCALER_INT_STATUS_ILLEGAL_SRC_CR_BASE (1 << 4) 201 + #define SCALER_INT_STATUS_ILLEGAL_SRC_CB_BASE (1 << 3) 202 + #define SCALER_INT_STATUS_ILLEGAL_SRC_Y_BASE (1 << 2) 203 + #define SCALER_INT_STATUS_ILLEGAL_SRC_COLOR (1 << 1) 204 + #define SCALER_INT_STATUS_FRAME_END (1 << 0) 205 + 206 + /* SCALER_SRC_CFG */ 207 + #define SCALER_SRC_CFG_TILE_EN (1 << 10) 208 + #define SCALER_SRC_CFG_GET_BYTE_SWAP(r) SCALER_GET(r, 6, 5) 209 + #define SCALER_SRC_CFG_SET_BYTE_SWAP(v) SCALER_SET(v, 6, 5) 210 + #define SCALER_SRC_CFG_GET_COLOR_FORMAT(r) SCALER_GET(r, 4, 0) 211 + #define SCALER_SRC_CFG_SET_COLOR_FORMAT(v) SCALER_SET(v, 4, 0) 212 + #define SCALER_YUV420_2P_UV 0 213 + #define SCALER_YUV422_2P_UV 2 214 + #define SCALER_YUV444_2P_UV 3 215 + #define SCALER_RGB_565 4 216 + #define SCALER_ARGB1555 5 217 + #define SCALER_ARGB8888 6 218 + #define SCALER_ARGB8888_PRE 7 219 + #define SCALER_YUV422_1P_YVYU 9 220 + #define SCALER_YUV422_1P_YUYV 10 221 + #define SCALER_YUV422_1P_UYVY 11 222 + #define SCALER_ARGB4444 12 223 + #define SCALER_L8A8 13 224 + #define SCALER_RGBA8888 14 225 + #define SCALER_L8 15 226 + #define SCALER_YUV420_2P_VU 16 227 + #define SCALER_YUV422_2P_VU 18 228 + #define SCALER_YUV444_2P_VU 19 229 + #define SCALER_YUV420_3P 20 230 + #define SCALER_YUV422_3P 22 231 + #define SCALER_YUV444_3P 23 232 + 233 + /* SCALER_SRC_SPAN */ 234 + #define SCALER_SRC_SPAN_GET_C_SPAN(r) SCALER_GET(r, 29, 16) 235 + #define SCALER_SRC_SPAN_SET_C_SPAN(v) SCALER_SET(v, 29, 16) 236 + #define SCALER_SRC_SPAN_GET_Y_SPAN(r) SCALER_GET(r, 13, 0) 237 + #define SCALER_SRC_SPAN_SET_Y_SPAN(v) SCALER_SET(v, 13, 0) 238 + 239 + /* SCALER_SRC_Y_POS */ 240 + #define SCALER_SRC_Y_POS_GET_YH_POS(r) SCALER_GET(r, 31, 16) 241 + #define SCALER_SRC_Y_POS_SET_YH_POS(v) SCALER_SET(v, 31, 16) 242 + #define SCALER_SRC_Y_POS_GET_YV_POS(r) SCALER_GET(r, 15, 0) 243 + #define SCALER_SRC_Y_POS_SET_YV_POS(v) SCALER_SET(v, 15, 0) 244 + 245 + /* SCALER_SRC_WH */ 246 + #define SCALER_SRC_WH_GET_WIDTH(r) SCALER_GET(r, 29, 16) 247 + #define SCALER_SRC_WH_SET_WIDTH(v) SCALER_SET(v, 29, 16) 248 + #define SCALER_SRC_WH_GET_HEIGHT(r) SCALER_GET(r, 13, 0) 249 + #define SCALER_SRC_WH_SET_HEIGHT(v) SCALER_SET(v, 13, 0) 250 + 251 + /* SCALER_SRC_C_POS */ 252 + #define SCALER_SRC_C_POS_GET_CH_POS(r) SCALER_GET(r, 31, 16) 253 + #define SCALER_SRC_C_POS_SET_CH_POS(v) SCALER_SET(v, 31, 16) 254 + #define SCALER_SRC_C_POS_GET_CV_POS(r) SCALER_GET(r, 15, 0) 255 + #define SCALER_SRC_C_POS_SET_CV_POS(v) SCALER_SET(v, 15, 0) 256 + 257 + /* SCALER_DST_CFG */ 258 + #define SCALER_DST_CFG_GET_BYTE_SWAP(r) SCALER_GET(r, 6, 5) 259 + #define SCALER_DST_CFG_SET_BYTE_SWAP(v) SCALER_SET(v, 6, 5) 260 + #define SCALER_DST_CFG_GET_COLOR_FORMAT(r) SCALER_GET(r, 4, 0) 261 + #define SCALER_DST_CFG_SET_COLOR_FORMAT(v) SCALER_SET(v, 4, 0) 262 + 263 + /* SCALER_DST_SPAN */ 264 + #define SCALER_DST_SPAN_GET_C_SPAN(r) SCALER_GET(r, 29, 16) 265 + #define SCALER_DST_SPAN_SET_C_SPAN(v) SCALER_SET(v, 29, 16) 266 + #define SCALER_DST_SPAN_GET_Y_SPAN(r) SCALER_GET(r, 13, 0) 267 + #define SCALER_DST_SPAN_SET_Y_SPAN(v) SCALER_SET(v, 13, 0) 268 + 269 + /* SCALER_DST_WH */ 270 + #define SCALER_DST_WH_GET_WIDTH(r) SCALER_GET(r, 29, 16) 271 + #define SCALER_DST_WH_SET_WIDTH(v) SCALER_SET(v, 29, 16) 272 + #define SCALER_DST_WH_GET_HEIGHT(r) SCALER_GET(r, 13, 0) 273 + #define SCALER_DST_WH_SET_HEIGHT(v) SCALER_SET(v, 13, 0) 274 + 275 + /* SCALER_DST_POS */ 276 + #define SCALER_DST_POS_GET_H_POS(r) SCALER_GET(r, 29, 16) 277 + #define SCALER_DST_POS_SET_H_POS(v) SCALER_SET(v, 29, 16) 278 + #define SCALER_DST_POS_GET_V_POS(r) SCALER_GET(r, 13, 0) 279 + #define SCALER_DST_POS_SET_V_POS(v) SCALER_SET(v, 13, 0) 280 + 281 + /* SCALER_H_RATIO */ 282 + #define SCALER_H_RATIO_GET(r) SCALER_GET(r, 18, 0) 283 + #define SCALER_H_RATIO_SET(v) SCALER_SET(v, 18, 0) 284 + 285 + /* SCALER_V_RATIO */ 286 + #define SCALER_V_RATIO_GET(r) SCALER_GET(r, 18, 0) 287 + #define SCALER_V_RATIO_SET(v) SCALER_SET(v, 18, 0) 288 + 289 + /* SCALER_ROT_CFG */ 290 + #define SCALER_ROT_CFG_FLIP_X_EN (1 << 3) 291 + #define SCALER_ROT_CFG_FLIP_Y_EN (1 << 2) 292 + #define SCALER_ROT_CFG_GET_ROTMODE(r) SCALER_GET(r, 1, 0) 293 + #define SCALER_ROT_CFG_SET_ROTMODE(v) SCALER_SET(v, 1, 0) 294 + #define SCALER_ROT_MODE_90 1 295 + #define SCALER_ROT_MODE_180 2 296 + #define SCALER_ROT_MODE_270 3 297 + 298 + /* SCALER_HCOEF, SCALER_VCOEF */ 299 + #define SCALER_COEF_SHIFT(i) (16 * (1 - (i) % 2)) 300 + #define SCALER_COEF_GET(r, i) \ 301 + (((r) >> SCALER_COEF_SHIFT(i)) & 0x1ff) 302 + #define SCALER_COEF_SET(v, i) \ 303 + (((v) & 0x1ff) << SCALER_COEF_SHIFT(i)) 304 + 305 + /* SCALER_CSC_COEFxy */ 306 + #define SCALER_CSC_COEF_GET(r) SCALER_GET(r, 11, 0) 307 + #define SCALER_CSC_COEF_SET(v) SCALER_SET(v, 11, 0) 308 + 309 + /* SCALER_DITH_CFG */ 310 + #define SCALER_DITH_CFG_GET_R_TYPE(r) SCALER_GET(r, 8, 6) 311 + #define SCALER_DITH_CFG_SET_R_TYPE(v) SCALER_SET(v, 8, 6) 312 + #define SCALER_DITH_CFG_GET_G_TYPE(r) SCALER_GET(r, 5, 3) 313 + #define SCALER_DITH_CFG_SET_G_TYPE(v) SCALER_SET(v, 5, 3) 314 + #define SCALER_DITH_CFG_GET_B_TYPE(r) SCALER_GET(r, 2, 0) 315 + #define SCALER_DITH_CFG_SET_B_TYPE(v) SCALER_SET(v, 2, 0) 316 + 317 + /* SCALER_TIMEOUT_CTRL */ 318 + #define SCALER_TIMEOUT_CTRL_GET_TIMER_VALUE(r) SCALER_GET(r, 31, 16) 319 + #define SCALER_TIMEOUT_CTRL_SET_TIMER_VALUE(v) SCALER_SET(v, 31, 16) 320 + #define SCALER_TIMEOUT_CTRL_GET_TIMER_DIV(r) SCALER_GET(r, 7, 4) 321 + #define SCALER_TIMEOUT_CTRL_SET_TIMER_DIV(v) SCALER_SET(v, 7, 4) 322 + #define SCALER_TIMEOUT_CTRL_TIMER_ENABLE (1 << 0) 323 + 324 + /* SCALER_TIMEOUT_CNT */ 325 + #define SCALER_TIMEOUT_CTRL_GET_TIMER_COUNT(r) SCALER_GET(r, 31, 16) 326 + 327 + /* SCALER_SRC_BLEND_COLOR */ 328 + #define SCALER_SRC_BLEND_COLOR_SEL_INV (1 << 31) 329 + #define SCALER_SRC_BLEND_COLOR_GET_SEL(r) SCALER_GET(r, 30, 29) 330 + #define SCALER_SRC_BLEND_COLOR_SET_SEL(v) SCALER_SET(v, 30, 29) 331 + #define SCALER_SRC_BLEND_COLOR_OP_SEL_INV (1 << 28) 332 + #define SCALER_SRC_BLEND_COLOR_GET_OP_SEL(r) SCALER_GET(r, 27, 24) 333 + #define SCALER_SRC_BLEND_COLOR_SET_OP_SEL(v) SCALER_SET(v, 27, 24) 334 + #define SCALER_SRC_BLEND_COLOR_GET_COLOR0(r) SCALER_GET(r, 23, 16) 335 + #define SCALER_SRC_BLEND_COLOR_SET_COLOR0(v) SCALER_SET(v, 23, 16) 336 + #define SCALER_SRC_BLEND_COLOR_GET_COLOR1(r) SCALER_GET(r, 15, 8) 337 + #define SCALER_SRC_BLEND_COLOR_SET_COLOR1(v) SCALER_SET(v, 15, 8) 338 + #define SCALER_SRC_BLEND_COLOR_GET_COLOR2(r) SCALER_GET(r, 7, 0) 339 + #define SCALER_SRC_BLEND_COLOR_SET_COLOR2(v) SCALER_SET(v, 7, 0) 340 + 341 + /* SCALER_SRC_BLEND_ALPHA */ 342 + #define SCALER_SRC_BLEND_ALPHA_SEL_INV (1 << 31) 343 + #define SCALER_SRC_BLEND_ALPHA_GET_SEL(r) SCALER_GET(r, 30, 29) 344 + #define SCALER_SRC_BLEND_ALPHA_SET_SEL(v) SCALER_SET(v, 30, 29) 345 + #define SCALER_SRC_BLEND_ALPHA_OP_SEL_INV (1 << 28) 346 + #define SCALER_SRC_BLEND_ALPHA_GET_OP_SEL(r) SCALER_GET(r, 27, 24) 347 + #define SCALER_SRC_BLEND_ALPHA_SET_OP_SEL(v) SCALER_SET(v, 27, 24) 348 + #define SCALER_SRC_BLEND_ALPHA_GET_ALPHA(r) SCALER_GET(r, 7, 0) 349 + #define SCALER_SRC_BLEND_ALPHA_SET_ALPHA(v) SCALER_SET(v, 7, 0) 350 + 351 + /* SCALER_DST_BLEND_COLOR */ 352 + #define SCALER_DST_BLEND_COLOR_SEL_INV (1 << 31) 353 + #define SCALER_DST_BLEND_COLOR_GET_SEL(r) SCALER_GET(r, 30, 29) 354 + #define SCALER_DST_BLEND_COLOR_SET_SEL(v) SCALER_SET(v, 30, 29) 355 + #define SCALER_DST_BLEND_COLOR_OP_SEL_INV (1 << 28) 356 + #define SCALER_DST_BLEND_COLOR_GET_OP_SEL(r) SCALER_GET(r, 27, 24) 357 + #define SCALER_DST_BLEND_COLOR_SET_OP_SEL(v) SCALER_SET(v, 27, 24) 358 + #define SCALER_DST_BLEND_COLOR_GET_COLOR0(r) SCALER_GET(r, 23, 16) 359 + #define SCALER_DST_BLEND_COLOR_SET_COLOR0(v) SCALER_SET(v, 23, 16) 360 + #define SCALER_DST_BLEND_COLOR_GET_COLOR1(r) SCALER_GET(r, 15, 8) 361 + #define SCALER_DST_BLEND_COLOR_SET_COLOR1(v) SCALER_SET(v, 15, 8) 362 + #define SCALER_DST_BLEND_COLOR_GET_COLOR2(r) SCALER_GET(r, 7, 0) 363 + #define SCALER_DST_BLEND_COLOR_SET_COLOR2(v) SCALER_SET(v, 7, 0) 364 + 365 + /* SCALER_DST_BLEND_ALPHA */ 366 + #define SCALER_DST_BLEND_ALPHA_SEL_INV (1 << 31) 367 + #define SCALER_DST_BLEND_ALPHA_GET_SEL(r) SCALER_GET(r, 30, 29) 368 + #define SCALER_DST_BLEND_ALPHA_SET_SEL(v) SCALER_SET(v, 30, 29) 369 + #define SCALER_DST_BLEND_ALPHA_OP_SEL_INV (1 << 28) 370 + #define SCALER_DST_BLEND_ALPHA_GET_OP_SEL(r) SCALER_GET(r, 27, 24) 371 + #define SCALER_DST_BLEND_ALPHA_SET_OP_SEL(v) SCALER_SET(v, 27, 24) 372 + #define SCALER_DST_BLEND_ALPHA_GET_ALPHA(r) SCALER_GET(r, 7, 0) 373 + #define SCALER_DST_BLEND_ALPHA_SET_ALPHA(v) SCALER_SET(v, 7, 0) 374 + 375 + /* SCALER_FILL_COLOR */ 376 + #define SCALER_FILL_COLOR_GET_ALPHA(r) SCALER_GET(r, 31, 24) 377 + #define SCALER_FILL_COLOR_SET_ALPHA(v) SCALER_SET(v, 31, 24) 378 + #define SCALER_FILL_COLOR_GET_FILL_COLOR0(r) SCALER_GET(r, 23, 16) 379 + #define SCALER_FILL_COLOR_SET_FILL_COLOR0(v) SCALER_SET(v, 23, 16) 380 + #define SCALER_FILL_COLOR_GET_FILL_COLOR1(r) SCALER_GET(r, 15, 8) 381 + #define SCALER_FILL_COLOR_SET_FILL_COLOR1(v) SCALER_SET(v, 15, 8) 382 + #define SCALER_FILL_COLOR_GET_FILL_COLOR2(r) SCALER_GET(r, 7, 0) 383 + #define SCALER_FILL_COLOR_SET_FILL_COLOR2(v) SCALER_SET(v, 7, 0) 384 + 385 + /* SCALER_ADDR_Q_CONFIG */ 386 + #define SCALER_ADDR_Q_CONFIG_RST (1 << 0) 387 + 388 + /* SCALER_SRC_ADDR_Q_STATUS */ 389 + #define SCALER_SRC_ADDR_Q_STATUS_Y_FULL (1 << 23) 390 + #define SCALER_SRC_ADDR_Q_STATUS_Y_EMPTY (1 << 22) 391 + #define SCALER_SRC_ADDR_Q_STATUS_GET_Y_WR_IDX(r) SCALER_GET(r, 21, 16) 392 + #define SCALER_SRC_ADDR_Q_STATUS_CB_FULL (1 << 15) 393 + #define SCALER_SRC_ADDR_Q_STATUS_CB_EMPTY (1 << 14) 394 + #define SCALER_SRC_ADDR_Q_STATUS_GET_CB_WR_IDX(r) SCALER_GET(r, 13, 8) 395 + #define SCALER_SRC_ADDR_Q_STATUS_CR_FULL (1 << 7) 396 + #define SCALER_SRC_ADDR_Q_STATUS_CR_EMPTY (1 << 6) 397 + #define SCALER_SRC_ADDR_Q_STATUS_GET_CR_WR_IDX(r) SCALER_GET(r, 5, 0) 398 + 399 + /* SCALER_DST_ADDR_Q_STATUS */ 400 + #define SCALER_DST_ADDR_Q_STATUS_Y_FULL (1 << 23) 401 + #define SCALER_DST_ADDR_Q_STATUS_Y_EMPTY (1 << 22) 402 + #define SCALER_DST_ADDR_Q_STATUS_GET_Y_WR_IDX(r) SCALER_GET(r, 21, 16) 403 + #define SCALER_DST_ADDR_Q_STATUS_CB_FULL (1 << 15) 404 + #define SCALER_DST_ADDR_Q_STATUS_CB_EMPTY (1 << 14) 405 + #define SCALER_DST_ADDR_Q_STATUS_GET_CB_WR_IDX(r) SCALER_GET(r, 13, 8) 406 + #define SCALER_DST_ADDR_Q_STATUS_CR_FULL (1 << 7) 407 + #define SCALER_DST_ADDR_Q_STATUS_CR_EMPTY (1 << 6) 408 + #define SCALER_DST_ADDR_Q_STATUS_GET_CR_WR_IDX(r) SCALER_GET(r, 5, 0) 409 + 410 + /* SCALER_CRC_COLOR00_10 */ 411 + #define SCALER_CRC_COLOR00_10_GET_00(r) SCALER_GET(r, 31, 16) 412 + #define SCALER_CRC_COLOR00_10_GET_10(r) SCALER_GET(r, 15, 0) 413 + 414 + /* SCALER_CRC_COLOR20_30 */ 415 + #define SCALER_CRC_COLOR20_30_GET_20(r) SCALER_GET(r, 31, 16) 416 + #define SCALER_CRC_COLOR20_30_GET_30(r) SCALER_GET(r, 15, 0) 417 + 418 + /* SCALER_CRC_COLOR01_11 */ 419 + #define SCALER_CRC_COLOR01_11_GET_01(r) SCALER_GET(r, 31, 16) 420 + #define SCALER_CRC_COLOR01_11_GET_11(r) SCALER_GET(r, 15, 0) 421 + 422 + /* SCALER_CRC_COLOR21_31 */ 423 + #define SCALER_CRC_COLOR21_31_GET_21(r) SCALER_GET(r, 31, 16) 424 + #define SCALER_CRC_COLOR21_31_GET_31(r) SCALER_GET(r, 15, 0) 425 + 426 + #endif /* EXYNOS_REGS_SCALER_H */
+1
drivers/gpu/drm/i915/intel_csr.c
··· 35 35 */ 36 36 37 37 #define I915_CSR_GLK "i915/glk_dmc_ver1_04.bin" 38 + MODULE_FIRMWARE(I915_CSR_GLK); 38 39 #define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 4) 39 40 40 41 #define I915_CSR_CNL "i915/cnl_dmc_ver1_07.bin"
+45 -1
drivers/gpu/drm/vc4/vc4_crtc.c
··· 741 741 struct vc4_async_flip_state { 742 742 struct drm_crtc *crtc; 743 743 struct drm_framebuffer *fb; 744 + struct drm_framebuffer *old_fb; 744 745 struct drm_pending_vblank_event *event; 745 746 746 747 struct vc4_seqno_cb cb; ··· 771 770 772 771 drm_crtc_vblank_put(crtc); 773 772 drm_framebuffer_put(flip_state->fb); 773 + 774 + /* Decrement the BO usecnt in order to keep the inc/dec calls balanced 775 + * when the planes are updated through the async update path. 776 + * FIXME: we should move to generic async-page-flip when it's 777 + * available, so that we can get rid of this hand-made cleanup_fb() 778 + * logic. 779 + */ 780 + if (flip_state->old_fb) { 781 + struct drm_gem_cma_object *cma_bo; 782 + struct vc4_bo *bo; 783 + 784 + cma_bo = drm_fb_cma_get_gem_obj(flip_state->old_fb, 0); 785 + bo = to_vc4_bo(&cma_bo->base); 786 + vc4_bo_dec_usecnt(bo); 787 + drm_framebuffer_put(flip_state->old_fb); 788 + } 789 + 774 790 kfree(flip_state); 775 791 776 792 up(&vc4->async_modeset); ··· 812 794 struct drm_gem_cma_object *cma_bo = drm_fb_cma_get_gem_obj(fb, 0); 813 795 struct vc4_bo *bo = to_vc4_bo(&cma_bo->base); 814 796 797 + /* Increment the BO usecnt here, so that we never end up with an 798 + * unbalanced number of vc4_bo_{dec,inc}_usecnt() calls when the 799 + * plane is later updated through the non-async path. 800 + * FIXME: we should move to generic async-page-flip when it's 801 + * available, so that we can get rid of this hand-made prepare_fb() 802 + * logic. 803 + */ 804 + ret = vc4_bo_inc_usecnt(bo); 805 + if (ret) 806 + return ret; 807 + 815 808 flip_state = kzalloc(sizeof(*flip_state), GFP_KERNEL); 816 - if (!flip_state) 809 + if (!flip_state) { 810 + vc4_bo_dec_usecnt(bo); 817 811 return -ENOMEM; 812 + } 818 813 819 814 drm_framebuffer_get(fb); 820 815 flip_state->fb = fb; ··· 838 807 ret = down_interruptible(&vc4->async_modeset); 839 808 if (ret) { 840 809 drm_framebuffer_put(fb); 810 + vc4_bo_dec_usecnt(bo); 841 811 kfree(flip_state); 842 812 return ret; 843 813 } 814 + 815 + /* Save the current FB before it's replaced by the new one in 816 + * drm_atomic_set_fb_for_plane(). We'll need the old FB in 817 + * vc4_async_page_flip_complete() to decrement the BO usecnt and keep 818 + * it consistent. 819 + * FIXME: we should move to generic async-page-flip when it's 820 + * available, so that we can get rid of this hand-made cleanup_fb() 821 + * logic. 822 + */ 823 + flip_state->old_fb = plane->state->fb; 824 + if (flip_state->old_fb) 825 + drm_framebuffer_get(flip_state->old_fb); 844 826 845 827 WARN_ON(drm_crtc_vblank_get(crtc) != 0); 846 828
+10 -21
drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
··· 441 441 struct drm_crtc *crtc = set->crtc; 442 442 struct drm_framebuffer *fb; 443 443 struct drm_crtc *tmp; 444 - struct drm_modeset_acquire_ctx *ctx; 445 444 struct drm_device *dev = set->crtc->dev; 445 + struct drm_modeset_acquire_ctx ctx; 446 446 int ret; 447 447 448 - ctx = dev->mode_config.acquire_ctx; 448 + drm_modeset_acquire_init(&ctx, 0); 449 449 450 450 restart: 451 451 /* ··· 458 458 459 459 fb = set->fb; 460 460 461 - ret = crtc->funcs->set_config(set, ctx); 461 + ret = crtc->funcs->set_config(set, &ctx); 462 462 if (ret == 0) { 463 463 crtc->primary->crtc = crtc; 464 464 crtc->primary->fb = fb; ··· 473 473 } 474 474 475 475 if (ret == -EDEADLK) { 476 - dev->mode_config.acquire_ctx = NULL; 477 - 478 - retry_locking: 479 - drm_modeset_backoff(ctx); 480 - 481 - ret = drm_modeset_lock_all_ctx(dev, ctx); 482 - if (ret) 483 - goto retry_locking; 484 - 485 - dev->mode_config.acquire_ctx = ctx; 486 - 476 + drm_modeset_backoff(&ctx); 487 477 goto restart; 488 478 } 479 + 480 + drm_modeset_drop_locks(&ctx); 481 + drm_modeset_acquire_fini(&ctx); 489 482 490 483 return ret; 491 484 } ··· 617 624 } 618 625 619 626 mutex_lock(&par->bo_mutex); 620 - drm_modeset_lock_all(vmw_priv->dev); 621 627 ret = vmw_fb_kms_framebuffer(info); 622 628 if (ret) 623 629 goto out_unlock; ··· 649 657 drm_mode_destroy(vmw_priv->dev, old_mode); 650 658 par->set_mode = mode; 651 659 652 - drm_modeset_unlock_all(vmw_priv->dev); 653 660 mutex_unlock(&par->bo_mutex); 654 661 655 662 return ret; ··· 704 713 par->max_width = fb_width; 705 714 par->max_height = fb_height; 706 715 707 - drm_modeset_lock_all(vmw_priv->dev); 708 716 ret = vmw_kms_fbdev_init_data(vmw_priv, 0, par->max_width, 709 717 par->max_height, &par->con, 710 718 &par->crtc, &init_mode); 711 - if (ret) { 712 - drm_modeset_unlock_all(vmw_priv->dev); 719 + if (ret) 713 720 goto err_kms; 714 - } 715 721 716 722 info->var.xres = init_mode->hdisplay; 717 723 info->var.yres = init_mode->vdisplay; 718 - drm_modeset_unlock_all(vmw_priv->dev); 719 724 720 725 /* 721 726 * Create buffers and alloc memory ··· 819 832 cancel_delayed_work_sync(&par->local_work); 820 833 unregister_framebuffer(info); 821 834 835 + mutex_lock(&par->bo_mutex); 822 836 (void) vmw_fb_kms_detach(par, true, true); 837 + mutex_unlock(&par->bo_mutex); 823 838 824 839 vfree(par->vmalloc); 825 840 framebuffer_release(info);
+11 -3
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
··· 2595 2595 vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf, 2596 2596 out_fence, NULL); 2597 2597 2598 + vmw_dmabuf_unreference(&ctx->buf); 2598 2599 vmw_resource_unreserve(res, false, NULL, 0); 2599 2600 mutex_unlock(&res->dev_priv->cmdbuf_mutex); 2600 2601 } ··· 2681 2680 struct vmw_display_unit *du; 2682 2681 struct drm_display_mode *mode; 2683 2682 int i = 0; 2683 + int ret = 0; 2684 2684 2685 + mutex_lock(&dev_priv->dev->mode_config.mutex); 2685 2686 list_for_each_entry(con, &dev_priv->dev->mode_config.connector_list, 2686 2687 head) { 2687 2688 if (i == unit) ··· 2694 2691 2695 2692 if (i != unit) { 2696 2693 DRM_ERROR("Could not find initial display unit.\n"); 2697 - return -EINVAL; 2694 + ret = -EINVAL; 2695 + goto out_unlock; 2698 2696 } 2699 2697 2700 2698 if (list_empty(&con->modes)) ··· 2703 2699 2704 2700 if (list_empty(&con->modes)) { 2705 2701 DRM_ERROR("Could not find initial display mode.\n"); 2706 - return -EINVAL; 2702 + ret = -EINVAL; 2703 + goto out_unlock; 2707 2704 } 2708 2705 2709 2706 du = vmw_connector_to_du(con); ··· 2725 2720 head); 2726 2721 } 2727 2722 2728 - return 0; 2723 + out_unlock: 2724 + mutex_unlock(&dev_priv->dev->mode_config.mutex); 2725 + 2726 + return ret; 2729 2727 } 2730 2728 2731 2729 /**
+240
include/uapi/drm/exynos_drm.h
··· 135 135 __u64 async; 136 136 }; 137 137 138 + /* Exynos DRM IPP v2 API */ 139 + 140 + /** 141 + * Enumerate available IPP hardware modules. 142 + * 143 + * @count_ipps: size of ipp_id array / number of ipp modules (set by driver) 144 + * @reserved: padding 145 + * @ipp_id_ptr: pointer to ipp_id array or NULL 146 + */ 147 + struct drm_exynos_ioctl_ipp_get_res { 148 + __u32 count_ipps; 149 + __u32 reserved; 150 + __u64 ipp_id_ptr; 151 + }; 152 + 153 + enum drm_exynos_ipp_format_type { 154 + DRM_EXYNOS_IPP_FORMAT_SOURCE = 0x01, 155 + DRM_EXYNOS_IPP_FORMAT_DESTINATION = 0x02, 156 + }; 157 + 158 + struct drm_exynos_ipp_format { 159 + __u32 fourcc; 160 + __u32 type; 161 + __u64 modifier; 162 + }; 163 + 164 + enum drm_exynos_ipp_capability { 165 + DRM_EXYNOS_IPP_CAP_CROP = 0x01, 166 + DRM_EXYNOS_IPP_CAP_ROTATE = 0x02, 167 + DRM_EXYNOS_IPP_CAP_SCALE = 0x04, 168 + DRM_EXYNOS_IPP_CAP_CONVERT = 0x08, 169 + }; 170 + 171 + /** 172 + * Get IPP hardware capabilities and supported image formats. 173 + * 174 + * @ipp_id: id of IPP module to query 175 + * @capabilities: bitmask of drm_exynos_ipp_capability (set by driver) 176 + * @reserved: padding 177 + * @formats_count: size of formats array (in entries) / number of filled 178 + * formats (set by driver) 179 + * @formats_ptr: pointer to formats array or NULL 180 + */ 181 + struct drm_exynos_ioctl_ipp_get_caps { 182 + __u32 ipp_id; 183 + __u32 capabilities; 184 + __u32 reserved; 185 + __u32 formats_count; 186 + __u64 formats_ptr; 187 + }; 188 + 189 + enum drm_exynos_ipp_limit_type { 190 + /* size (horizontal/vertial) limits, in pixels (min, max, alignment) */ 191 + DRM_EXYNOS_IPP_LIMIT_TYPE_SIZE = 0x0001, 192 + /* scale ratio (horizonta/vertial), 16.16 fixed point (min, max) */ 193 + DRM_EXYNOS_IPP_LIMIT_TYPE_SCALE = 0x0002, 194 + 195 + /* image buffer area */ 196 + DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER = 0x0001 << 16, 197 + /* src/dst rectangle area */ 198 + DRM_EXYNOS_IPP_LIMIT_SIZE_AREA = 0x0002 << 16, 199 + /* src/dst rectangle area when rotation enabled */ 200 + DRM_EXYNOS_IPP_LIMIT_SIZE_ROTATED = 0x0003 << 16, 201 + 202 + DRM_EXYNOS_IPP_LIMIT_TYPE_MASK = 0x000f, 203 + DRM_EXYNOS_IPP_LIMIT_SIZE_MASK = 0x000f << 16, 204 + }; 205 + 206 + struct drm_exynos_ipp_limit_val { 207 + __u32 min; 208 + __u32 max; 209 + __u32 align; 210 + __u32 reserved; 211 + }; 212 + 213 + /** 214 + * IPP module limitation. 215 + * 216 + * @type: limit type (see drm_exynos_ipp_limit_type enum) 217 + * @reserved: padding 218 + * @h: horizontal limits 219 + * @v: vertical limits 220 + */ 221 + struct drm_exynos_ipp_limit { 222 + __u32 type; 223 + __u32 reserved; 224 + struct drm_exynos_ipp_limit_val h; 225 + struct drm_exynos_ipp_limit_val v; 226 + }; 227 + 228 + /** 229 + * Get IPP limits for given image format. 230 + * 231 + * @ipp_id: id of IPP module to query 232 + * @fourcc: image format code (see DRM_FORMAT_* in drm_fourcc.h) 233 + * @modifier: image format modifier (see DRM_FORMAT_MOD_* in drm_fourcc.h) 234 + * @type: source/destination identifier (drm_exynos_ipp_format_flag enum) 235 + * @limits_count: size of limits array (in entries) / number of filled entries 236 + * (set by driver) 237 + * @limits_ptr: pointer to limits array or NULL 238 + */ 239 + struct drm_exynos_ioctl_ipp_get_limits { 240 + __u32 ipp_id; 241 + __u32 fourcc; 242 + __u64 modifier; 243 + __u32 type; 244 + __u32 limits_count; 245 + __u64 limits_ptr; 246 + }; 247 + 248 + enum drm_exynos_ipp_task_id { 249 + /* buffer described by struct drm_exynos_ipp_task_buffer */ 250 + DRM_EXYNOS_IPP_TASK_BUFFER = 0x0001, 251 + /* rectangle described by struct drm_exynos_ipp_task_rect */ 252 + DRM_EXYNOS_IPP_TASK_RECTANGLE = 0x0002, 253 + /* transformation described by struct drm_exynos_ipp_task_transform */ 254 + DRM_EXYNOS_IPP_TASK_TRANSFORM = 0x0003, 255 + /* alpha configuration described by struct drm_exynos_ipp_task_alpha */ 256 + DRM_EXYNOS_IPP_TASK_ALPHA = 0x0004, 257 + 258 + /* source image data (for buffer and rectangle chunks) */ 259 + DRM_EXYNOS_IPP_TASK_TYPE_SOURCE = 0x0001 << 16, 260 + /* destination image data (for buffer and rectangle chunks) */ 261 + DRM_EXYNOS_IPP_TASK_TYPE_DESTINATION = 0x0002 << 16, 262 + }; 263 + 264 + /** 265 + * Memory buffer with image data. 266 + * 267 + * @id: must be DRM_EXYNOS_IPP_TASK_BUFFER 268 + * other parameters are same as for AddFB2 generic DRM ioctl 269 + */ 270 + struct drm_exynos_ipp_task_buffer { 271 + __u32 id; 272 + __u32 fourcc; 273 + __u32 width, height; 274 + __u32 gem_id[4]; 275 + __u32 offset[4]; 276 + __u32 pitch[4]; 277 + __u64 modifier; 278 + }; 279 + 280 + /** 281 + * Rectangle for processing. 282 + * 283 + * @id: must be DRM_EXYNOS_IPP_TASK_RECTANGLE 284 + * @reserved: padding 285 + * @x,@y: left corner in pixels 286 + * @w,@h: width/height in pixels 287 + */ 288 + struct drm_exynos_ipp_task_rect { 289 + __u32 id; 290 + __u32 reserved; 291 + __u32 x; 292 + __u32 y; 293 + __u32 w; 294 + __u32 h; 295 + }; 296 + 297 + /** 298 + * Image tranformation description. 299 + * 300 + * @id: must be DRM_EXYNOS_IPP_TASK_TRANSFORM 301 + * @rotation: DRM_MODE_ROTATE_* and DRM_MODE_REFLECT_* values 302 + */ 303 + struct drm_exynos_ipp_task_transform { 304 + __u32 id; 305 + __u32 rotation; 306 + }; 307 + 308 + /** 309 + * Image global alpha configuration for formats without alpha values. 310 + * 311 + * @id: must be DRM_EXYNOS_IPP_TASK_ALPHA 312 + * @value: global alpha value (0-255) 313 + */ 314 + struct drm_exynos_ipp_task_alpha { 315 + __u32 id; 316 + __u32 value; 317 + }; 318 + 319 + enum drm_exynos_ipp_flag { 320 + /* generate DRM event after processing */ 321 + DRM_EXYNOS_IPP_FLAG_EVENT = 0x01, 322 + /* dry run, only check task parameters */ 323 + DRM_EXYNOS_IPP_FLAG_TEST_ONLY = 0x02, 324 + /* non-blocking processing */ 325 + DRM_EXYNOS_IPP_FLAG_NONBLOCK = 0x04, 326 + }; 327 + 328 + #define DRM_EXYNOS_IPP_FLAGS (DRM_EXYNOS_IPP_FLAG_EVENT |\ 329 + DRM_EXYNOS_IPP_FLAG_TEST_ONLY | DRM_EXYNOS_IPP_FLAG_NONBLOCK) 330 + 331 + /** 332 + * Perform image processing described by array of drm_exynos_ipp_task_* 333 + * structures (parameters array). 334 + * 335 + * @ipp_id: id of IPP module to run the task 336 + * @flags: bitmask of drm_exynos_ipp_flag values 337 + * @reserved: padding 338 + * @params_size: size of parameters array (in bytes) 339 + * @params_ptr: pointer to parameters array or NULL 340 + * @user_data: (optional) data for drm event 341 + */ 342 + struct drm_exynos_ioctl_ipp_commit { 343 + __u32 ipp_id; 344 + __u32 flags; 345 + __u32 reserved; 346 + __u32 params_size; 347 + __u64 params_ptr; 348 + __u64 user_data; 349 + }; 350 + 138 351 #define DRM_EXYNOS_GEM_CREATE 0x00 139 352 #define DRM_EXYNOS_GEM_MAP 0x01 140 353 /* Reserved 0x03 ~ 0x05 for exynos specific gem ioctl */ ··· 360 147 #define DRM_EXYNOS_G2D_EXEC 0x22 361 148 362 149 /* Reserved 0x30 ~ 0x33 for obsolete Exynos IPP ioctls */ 150 + /* IPP - Image Post Processing */ 151 + #define DRM_EXYNOS_IPP_GET_RESOURCES 0x40 152 + #define DRM_EXYNOS_IPP_GET_CAPS 0x41 153 + #define DRM_EXYNOS_IPP_GET_LIMITS 0x42 154 + #define DRM_EXYNOS_IPP_COMMIT 0x43 363 155 364 156 #define DRM_IOCTL_EXYNOS_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + \ 365 157 DRM_EXYNOS_GEM_CREATE, struct drm_exynos_gem_create) ··· 383 165 #define DRM_IOCTL_EXYNOS_G2D_EXEC DRM_IOWR(DRM_COMMAND_BASE + \ 384 166 DRM_EXYNOS_G2D_EXEC, struct drm_exynos_g2d_exec) 385 167 168 + #define DRM_IOCTL_EXYNOS_IPP_GET_RESOURCES DRM_IOWR(DRM_COMMAND_BASE + \ 169 + DRM_EXYNOS_IPP_GET_RESOURCES, \ 170 + struct drm_exynos_ioctl_ipp_get_res) 171 + #define DRM_IOCTL_EXYNOS_IPP_GET_CAPS DRM_IOWR(DRM_COMMAND_BASE + \ 172 + DRM_EXYNOS_IPP_GET_CAPS, struct drm_exynos_ioctl_ipp_get_caps) 173 + #define DRM_IOCTL_EXYNOS_IPP_GET_LIMITS DRM_IOWR(DRM_COMMAND_BASE + \ 174 + DRM_EXYNOS_IPP_GET_LIMITS, \ 175 + struct drm_exynos_ioctl_ipp_get_limits) 176 + #define DRM_IOCTL_EXYNOS_IPP_COMMIT DRM_IOWR(DRM_COMMAND_BASE + \ 177 + DRM_EXYNOS_IPP_COMMIT, struct drm_exynos_ioctl_ipp_commit) 178 + 386 179 /* EXYNOS specific events */ 387 180 #define DRM_EXYNOS_G2D_EVENT 0x80000000 181 + #define DRM_EXYNOS_IPP_EVENT 0x80000002 388 182 389 183 struct drm_exynos_g2d_event { 390 184 struct drm_event base; ··· 405 175 __u32 tv_usec; 406 176 __u32 cmdlist_no; 407 177 __u32 reserved; 178 + }; 179 + 180 + struct drm_exynos_ipp_event { 181 + struct drm_event base; 182 + __u64 user_data; 183 + __u32 tv_sec; 184 + __u32 tv_usec; 185 + __u32 ipp_id; 186 + __u32 sequence; 187 + __u64 reserved; 408 188 }; 409 189 410 190 #if defined(__cplusplus)