Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/msm: Initial add eDP support in msm drm driver (v5)

This change adds a new eDP connector in msm drm driver. With this
change, eDP panel can work with msm platform under drm framework.

v1: Initial change

v2: Address Rob's comments
Use generated header file for register definitions
Change to devm_* APIs

v3: Address Thierry's comments and rebase on top of atomic changes
Remove edp_bridge_mode_fixup
Remove backlight control code and rely on pwm-backlight
Remove continuous splash screen support for now
Change to gpiod_* APIs

v4: Fix kbuild test issue

Signed-off-by: Hai Li <hali@codeaurora.org>
[robclark: v5: rebase on drm_bridge changes in drm-next]
Signed-off-by: Rob Clark <robdclark@gmail.com>

authored by

Hai Li and committed by
Rob Clark
ab5b0107 b1b1c74e

+2350
+6
drivers/gpu/drm/msm/Makefile
··· 16 16 hdmi/hdmi_phy_8960.o \ 17 17 hdmi/hdmi_phy_8x60.o \ 18 18 hdmi/hdmi_phy_8x74.o \ 19 + edp/edp.o \ 20 + edp/edp_aux.o \ 21 + edp/edp_bridge.o \ 22 + edp/edp_connector.o \ 23 + edp/edp_ctrl.o \ 24 + edp/edp_phy.o \ 19 25 mdp/mdp_format.o \ 20 26 mdp/mdp_kms.o \ 21 27 mdp/mdp4/mdp4_crtc.o \
+208
drivers/gpu/drm/msm/edp/edp.c
··· 1 + /* 2 + * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 and 6 + * only version 2 as published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + */ 13 + 14 + #include <linux/of_irq.h> 15 + #include "edp.h" 16 + 17 + static irqreturn_t edp_irq(int irq, void *dev_id) 18 + { 19 + struct msm_edp *edp = dev_id; 20 + 21 + /* Process eDP irq */ 22 + return msm_edp_ctrl_irq(edp->ctrl); 23 + } 24 + 25 + static void edp_destroy(struct platform_device *pdev) 26 + { 27 + struct msm_edp *edp = platform_get_drvdata(pdev); 28 + 29 + if (!edp) 30 + return; 31 + 32 + if (edp->ctrl) { 33 + msm_edp_ctrl_destroy(edp->ctrl); 34 + edp->ctrl = NULL; 35 + } 36 + 37 + platform_set_drvdata(pdev, NULL); 38 + } 39 + 40 + /* construct eDP at bind/probe time, grab all the resources. */ 41 + static struct msm_edp *edp_init(struct platform_device *pdev) 42 + { 43 + struct msm_edp *edp = NULL; 44 + int ret; 45 + 46 + if (!pdev) { 47 + pr_err("no eDP device\n"); 48 + ret = -ENXIO; 49 + goto fail; 50 + } 51 + 52 + edp = devm_kzalloc(&pdev->dev, sizeof(*edp), GFP_KERNEL); 53 + if (!edp) { 54 + ret = -ENOMEM; 55 + goto fail; 56 + } 57 + DBG("eDP probed=%p", edp); 58 + 59 + edp->pdev = pdev; 60 + platform_set_drvdata(pdev, edp); 61 + 62 + ret = msm_edp_ctrl_init(edp); 63 + if (ret) 64 + goto fail; 65 + 66 + return edp; 67 + 68 + fail: 69 + if (edp) 70 + edp_destroy(pdev); 71 + 72 + return ERR_PTR(ret); 73 + } 74 + 75 + static int edp_bind(struct device *dev, struct device *master, void *data) 76 + { 77 + struct drm_device *drm = dev_get_drvdata(master); 78 + struct msm_drm_private *priv = drm->dev_private; 79 + struct msm_edp *edp; 80 + 81 + DBG(""); 82 + edp = edp_init(to_platform_device(dev)); 83 + if (IS_ERR(edp)) 84 + return PTR_ERR(edp); 85 + priv->edp = edp; 86 + 87 + return 0; 88 + } 89 + 90 + static void edp_unbind(struct device *dev, struct device *master, void *data) 91 + { 92 + struct drm_device *drm = dev_get_drvdata(master); 93 + struct msm_drm_private *priv = drm->dev_private; 94 + 95 + DBG(""); 96 + if (priv->edp) { 97 + edp_destroy(to_platform_device(dev)); 98 + priv->edp = NULL; 99 + } 100 + } 101 + 102 + static const struct component_ops edp_ops = { 103 + .bind = edp_bind, 104 + .unbind = edp_unbind, 105 + }; 106 + 107 + static int edp_dev_probe(struct platform_device *pdev) 108 + { 109 + DBG(""); 110 + return component_add(&pdev->dev, &edp_ops); 111 + } 112 + 113 + static int edp_dev_remove(struct platform_device *pdev) 114 + { 115 + DBG(""); 116 + component_del(&pdev->dev, &edp_ops); 117 + return 0; 118 + } 119 + 120 + static const struct of_device_id dt_match[] = { 121 + { .compatible = "qcom,mdss-edp" }, 122 + {} 123 + }; 124 + 125 + static struct platform_driver edp_driver = { 126 + .probe = edp_dev_probe, 127 + .remove = edp_dev_remove, 128 + .driver = { 129 + .name = "msm_edp", 130 + .of_match_table = dt_match, 131 + }, 132 + }; 133 + 134 + void __init msm_edp_register(void) 135 + { 136 + DBG(""); 137 + platform_driver_register(&edp_driver); 138 + } 139 + 140 + void __exit msm_edp_unregister(void) 141 + { 142 + DBG(""); 143 + platform_driver_unregister(&edp_driver); 144 + } 145 + 146 + /* Second part of initialization, the drm/kms level modeset_init */ 147 + int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev, 148 + struct drm_encoder *encoder) 149 + { 150 + struct platform_device *pdev = edp->pdev; 151 + struct msm_drm_private *priv = dev->dev_private; 152 + int ret; 153 + 154 + edp->encoder = encoder; 155 + edp->dev = dev; 156 + 157 + edp->bridge = msm_edp_bridge_init(edp); 158 + if (IS_ERR(edp->bridge)) { 159 + ret = PTR_ERR(edp->bridge); 160 + dev_err(dev->dev, "failed to create eDP bridge: %d\n", ret); 161 + edp->bridge = NULL; 162 + goto fail; 163 + } 164 + 165 + edp->connector = msm_edp_connector_init(edp); 166 + if (IS_ERR(edp->connector)) { 167 + ret = PTR_ERR(edp->connector); 168 + dev_err(dev->dev, "failed to create eDP connector: %d\n", ret); 169 + edp->connector = NULL; 170 + goto fail; 171 + } 172 + 173 + edp->irq = irq_of_parse_and_map(pdev->dev.of_node, 0); 174 + if (edp->irq < 0) { 175 + ret = edp->irq; 176 + dev_err(dev->dev, "failed to get IRQ: %d\n", ret); 177 + goto fail; 178 + } 179 + 180 + ret = devm_request_irq(&pdev->dev, edp->irq, 181 + edp_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, 182 + "edp_isr", edp); 183 + if (ret < 0) { 184 + dev_err(dev->dev, "failed to request IRQ%u: %d\n", 185 + edp->irq, ret); 186 + goto fail; 187 + } 188 + 189 + encoder->bridge = edp->bridge; 190 + 191 + priv->bridges[priv->num_bridges++] = edp->bridge; 192 + priv->connectors[priv->num_connectors++] = edp->connector; 193 + 194 + return 0; 195 + 196 + fail: 197 + /* bridge/connector are normally destroyed by drm */ 198 + if (edp->bridge) { 199 + edp_bridge_destroy(edp->bridge); 200 + edp->bridge = NULL; 201 + } 202 + if (edp->connector) { 203 + edp->connector->funcs->destroy(edp->connector); 204 + edp->connector = NULL; 205 + } 206 + 207 + return ret; 208 + }
+85
drivers/gpu/drm/msm/edp/edp.h
··· 1 + /* 2 + * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 and 6 + * only version 2 as published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + */ 13 + 14 + #ifndef __EDP_CONNECTOR_H__ 15 + #define __EDP_CONNECTOR_H__ 16 + 17 + #include <linux/i2c.h> 18 + #include <linux/interrupt.h> 19 + #include <linux/kernel.h> 20 + #include <linux/platform_device.h> 21 + 22 + #include "drm_crtc.h" 23 + #include "drm_dp_helper.h" 24 + #include "msm_drv.h" 25 + 26 + #define edp_read(offset) msm_readl((offset)) 27 + #define edp_write(offset, data) msm_writel((data), (offset)) 28 + 29 + struct edp_ctrl; 30 + struct edp_aux; 31 + struct edp_phy; 32 + 33 + struct msm_edp { 34 + struct drm_device *dev; 35 + struct platform_device *pdev; 36 + 37 + struct drm_connector *connector; 38 + struct drm_bridge *bridge; 39 + 40 + /* the encoder we are hooked to (outside of eDP block) */ 41 + struct drm_encoder *encoder; 42 + 43 + struct edp_ctrl *ctrl; 44 + 45 + int irq; 46 + }; 47 + 48 + /* eDP bridge */ 49 + struct drm_bridge *msm_edp_bridge_init(struct msm_edp *edp); 50 + void edp_bridge_destroy(struct drm_bridge *bridge); 51 + 52 + /* eDP connector */ 53 + struct drm_connector *msm_edp_connector_init(struct msm_edp *edp); 54 + 55 + /* AUX */ 56 + void *msm_edp_aux_init(struct device *dev, void __iomem *regbase, 57 + struct drm_dp_aux **drm_aux); 58 + void msm_edp_aux_destroy(struct device *dev, struct edp_aux *aux); 59 + irqreturn_t msm_edp_aux_irq(struct edp_aux *aux, u32 isr); 60 + void msm_edp_aux_ctrl(struct edp_aux *aux, int enable); 61 + 62 + /* Phy */ 63 + bool msm_edp_phy_ready(struct edp_phy *phy); 64 + void msm_edp_phy_ctrl(struct edp_phy *phy, int enable); 65 + void msm_edp_phy_vm_pe_init(struct edp_phy *phy); 66 + void msm_edp_phy_vm_pe_cfg(struct edp_phy *phy, u32 v0, u32 v1); 67 + void msm_edp_phy_lane_power_ctrl(struct edp_phy *phy, bool up, u32 max_lane); 68 + void *msm_edp_phy_init(struct device *dev, void __iomem *regbase); 69 + 70 + /* Ctrl */ 71 + irqreturn_t msm_edp_ctrl_irq(struct edp_ctrl *ctrl); 72 + void msm_edp_ctrl_power(struct edp_ctrl *ctrl, bool on); 73 + int msm_edp_ctrl_init(struct msm_edp *edp); 74 + void msm_edp_ctrl_destroy(struct edp_ctrl *ctrl); 75 + bool msm_edp_ctrl_panel_connected(struct edp_ctrl *ctrl); 76 + int msm_edp_ctrl_get_panel_info(struct edp_ctrl *ctrl, 77 + struct drm_connector *connector, struct edid **edid); 78 + int msm_edp_ctrl_timing_cfg(struct edp_ctrl *ctrl, 79 + const struct drm_display_mode *mode, 80 + const struct drm_display_info *info); 81 + /* @pixel_rate is in kHz */ 82 + bool msm_edp_ctrl_pixel_clock_valid(struct edp_ctrl *ctrl, 83 + u32 pixel_rate, u32 *pm, u32 *pn); 84 + 85 + #endif /* __EDP_CONNECTOR_H__ */
+268
drivers/gpu/drm/msm/edp/edp_aux.c
··· 1 + /* 2 + * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 and 6 + * only version 2 as published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + */ 13 + 14 + #include "edp.h" 15 + #include "edp.xml.h" 16 + 17 + #define AUX_CMD_FIFO_LEN 144 18 + #define AUX_CMD_NATIVE_MAX 16 19 + #define AUX_CMD_I2C_MAX 128 20 + 21 + #define EDP_INTR_AUX_I2C_ERR \ 22 + (EDP_INTERRUPT_REG_1_WRONG_ADDR | EDP_INTERRUPT_REG_1_TIMEOUT | \ 23 + EDP_INTERRUPT_REG_1_NACK_DEFER | EDP_INTERRUPT_REG_1_WRONG_DATA_CNT | \ 24 + EDP_INTERRUPT_REG_1_I2C_NACK | EDP_INTERRUPT_REG_1_I2C_DEFER) 25 + #define EDP_INTR_TRANS_STATUS \ 26 + (EDP_INTERRUPT_REG_1_AUX_I2C_DONE | EDP_INTR_AUX_I2C_ERR) 27 + 28 + struct edp_aux { 29 + void __iomem *base; 30 + bool msg_err; 31 + 32 + struct completion msg_comp; 33 + 34 + /* To prevent the message transaction routine from reentry. */ 35 + struct mutex msg_mutex; 36 + 37 + struct drm_dp_aux drm_aux; 38 + }; 39 + #define to_edp_aux(x) container_of(x, struct edp_aux, drm_aux) 40 + 41 + static int edp_msg_fifo_tx(struct edp_aux *aux, struct drm_dp_aux_msg *msg) 42 + { 43 + u32 data[4]; 44 + u32 reg, len; 45 + bool native = msg->request & (DP_AUX_NATIVE_WRITE & DP_AUX_NATIVE_READ); 46 + bool read = msg->request & (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ); 47 + u8 *msgdata = msg->buffer; 48 + int i; 49 + 50 + if (read) 51 + len = 4; 52 + else 53 + len = msg->size + 4; 54 + 55 + /* 56 + * cmd fifo only has depth of 144 bytes 57 + */ 58 + if (len > AUX_CMD_FIFO_LEN) 59 + return -EINVAL; 60 + 61 + /* Pack cmd and write to HW */ 62 + data[0] = (msg->address >> 16) & 0xf; /* addr[19:16] */ 63 + if (read) 64 + data[0] |= BIT(4); /* R/W */ 65 + 66 + data[1] = (msg->address >> 8) & 0xff; /* addr[15:8] */ 67 + data[2] = msg->address & 0xff; /* addr[7:0] */ 68 + data[3] = (msg->size - 1) & 0xff; /* len[7:0] */ 69 + 70 + for (i = 0; i < len; i++) { 71 + reg = (i < 4) ? data[i] : msgdata[i - 4]; 72 + reg = EDP_AUX_DATA_DATA(reg); /* index = 0, write */ 73 + if (i == 0) 74 + reg |= EDP_AUX_DATA_INDEX_WRITE; 75 + edp_write(aux->base + REG_EDP_AUX_DATA, reg); 76 + } 77 + 78 + reg = 0; /* Transaction number is always 1 */ 79 + if (!native) /* i2c */ 80 + reg |= EDP_AUX_TRANS_CTRL_I2C; 81 + 82 + reg |= EDP_AUX_TRANS_CTRL_GO; 83 + edp_write(aux->base + REG_EDP_AUX_TRANS_CTRL, reg); 84 + 85 + return 0; 86 + } 87 + 88 + static int edp_msg_fifo_rx(struct edp_aux *aux, struct drm_dp_aux_msg *msg) 89 + { 90 + u32 data; 91 + u8 *dp; 92 + int i; 93 + u32 len = msg->size; 94 + 95 + edp_write(aux->base + REG_EDP_AUX_DATA, 96 + EDP_AUX_DATA_INDEX_WRITE | EDP_AUX_DATA_READ); /* index = 0 */ 97 + 98 + dp = msg->buffer; 99 + 100 + /* discard first byte */ 101 + data = edp_read(aux->base + REG_EDP_AUX_DATA); 102 + for (i = 0; i < len; i++) { 103 + data = edp_read(aux->base + REG_EDP_AUX_DATA); 104 + dp[i] = (u8)((data >> 8) & 0xff); 105 + } 106 + 107 + return 0; 108 + } 109 + 110 + /* 111 + * This function does the real job to process an AUX transaction. 112 + * It will call msm_edp_aux_ctrl() function to reset the AUX channel, 113 + * if the waiting is timeout. 114 + * The caller who triggers the transaction should avoid the 115 + * msm_edp_aux_ctrl() running concurrently in other threads, i.e. 116 + * start transaction only when AUX channel is fully enabled. 117 + */ 118 + ssize_t edp_aux_transfer(struct drm_dp_aux *drm_aux, struct drm_dp_aux_msg *msg) 119 + { 120 + struct edp_aux *aux = to_edp_aux(drm_aux); 121 + ssize_t ret; 122 + bool native = msg->request & (DP_AUX_NATIVE_WRITE & DP_AUX_NATIVE_READ); 123 + bool read = msg->request & (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ); 124 + 125 + /* Ignore address only message */ 126 + if ((msg->size == 0) || (msg->buffer == NULL)) { 127 + msg->reply = native ? 128 + DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK; 129 + return msg->size; 130 + } 131 + 132 + /* msg sanity check */ 133 + if ((native && (msg->size > AUX_CMD_NATIVE_MAX)) || 134 + (msg->size > AUX_CMD_I2C_MAX)) { 135 + pr_err("%s: invalid msg: size(%d), request(%x)\n", 136 + __func__, msg->size, msg->request); 137 + return -EINVAL; 138 + } 139 + 140 + mutex_lock(&aux->msg_mutex); 141 + 142 + aux->msg_err = false; 143 + reinit_completion(&aux->msg_comp); 144 + 145 + ret = edp_msg_fifo_tx(aux, msg); 146 + if (ret < 0) 147 + goto unlock_exit; 148 + 149 + DBG("wait_for_completion"); 150 + ret = wait_for_completion_timeout(&aux->msg_comp, 300); 151 + if (ret <= 0) { 152 + /* 153 + * Clear GO and reset AUX channel 154 + * to cancel the current transaction. 155 + */ 156 + edp_write(aux->base + REG_EDP_AUX_TRANS_CTRL, 0); 157 + msm_edp_aux_ctrl(aux, 1); 158 + pr_err("%s: aux timeout, %d\n", __func__, ret); 159 + goto unlock_exit; 160 + } 161 + DBG("completion"); 162 + 163 + if (!aux->msg_err) { 164 + if (read) { 165 + ret = edp_msg_fifo_rx(aux, msg); 166 + if (ret < 0) 167 + goto unlock_exit; 168 + } 169 + 170 + msg->reply = native ? 171 + DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK; 172 + } else { 173 + /* Reply defer to retry */ 174 + msg->reply = native ? 175 + DP_AUX_NATIVE_REPLY_DEFER : DP_AUX_I2C_REPLY_DEFER; 176 + /* 177 + * The sleep time in caller is not long enough to make sure 178 + * our H/W completes transactions. Add more defer time here. 179 + */ 180 + msleep(100); 181 + } 182 + 183 + /* Return requested size for success or retry */ 184 + ret = msg->size; 185 + 186 + unlock_exit: 187 + mutex_unlock(&aux->msg_mutex); 188 + return ret; 189 + } 190 + 191 + void *msm_edp_aux_init(struct device *dev, void __iomem *regbase, 192 + struct drm_dp_aux **drm_aux) 193 + { 194 + struct edp_aux *aux = NULL; 195 + int ret; 196 + 197 + DBG(""); 198 + aux = devm_kzalloc(dev, sizeof(*aux), GFP_KERNEL); 199 + if (!aux) 200 + return NULL; 201 + 202 + aux->base = regbase; 203 + mutex_init(&aux->msg_mutex); 204 + init_completion(&aux->msg_comp); 205 + 206 + aux->drm_aux.name = "msm_edp_aux"; 207 + aux->drm_aux.dev = dev; 208 + aux->drm_aux.transfer = edp_aux_transfer; 209 + ret = drm_dp_aux_register(&aux->drm_aux); 210 + if (ret) { 211 + pr_err("%s: failed to register drm aux: %d\n", __func__, ret); 212 + mutex_destroy(&aux->msg_mutex); 213 + } 214 + 215 + if (drm_aux && aux) 216 + *drm_aux = &aux->drm_aux; 217 + 218 + return aux; 219 + } 220 + 221 + void msm_edp_aux_destroy(struct device *dev, struct edp_aux *aux) 222 + { 223 + if (aux) { 224 + drm_dp_aux_unregister(&aux->drm_aux); 225 + mutex_destroy(&aux->msg_mutex); 226 + } 227 + } 228 + 229 + irqreturn_t msm_edp_aux_irq(struct edp_aux *aux, u32 isr) 230 + { 231 + if (isr & EDP_INTR_TRANS_STATUS) { 232 + DBG("isr=%x", isr); 233 + edp_write(aux->base + REG_EDP_AUX_TRANS_CTRL, 0); 234 + 235 + if (isr & EDP_INTR_AUX_I2C_ERR) 236 + aux->msg_err = true; 237 + else 238 + aux->msg_err = false; 239 + 240 + complete(&aux->msg_comp); 241 + } 242 + 243 + return IRQ_HANDLED; 244 + } 245 + 246 + void msm_edp_aux_ctrl(struct edp_aux *aux, int enable) 247 + { 248 + u32 data; 249 + 250 + DBG("enable=%d", enable); 251 + data = edp_read(aux->base + REG_EDP_AUX_CTRL); 252 + 253 + if (enable) { 254 + data |= EDP_AUX_CTRL_RESET; 255 + edp_write(aux->base + REG_EDP_AUX_CTRL, data); 256 + /* Make sure full reset */ 257 + wmb(); 258 + usleep_range(500, 1000); 259 + 260 + data &= ~EDP_AUX_CTRL_RESET; 261 + data |= EDP_AUX_CTRL_ENABLE; 262 + edp_write(aux->base + REG_EDP_AUX_CTRL, data); 263 + } else { 264 + data &= ~EDP_AUX_CTRL_ENABLE; 265 + edp_write(aux->base + REG_EDP_AUX_CTRL, data); 266 + } 267 + } 268 +
+120
drivers/gpu/drm/msm/edp/edp_bridge.c
··· 1 + /* 2 + * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 and 6 + * only version 2 as published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + */ 13 + 14 + #include "edp.h" 15 + 16 + struct edp_bridge { 17 + struct drm_bridge base; 18 + struct msm_edp *edp; 19 + }; 20 + #define to_edp_bridge(x) container_of(x, struct edp_bridge, base) 21 + 22 + void edp_bridge_destroy(struct drm_bridge *bridge) 23 + { 24 + } 25 + 26 + static void edp_bridge_pre_enable(struct drm_bridge *bridge) 27 + { 28 + struct edp_bridge *edp_bridge = to_edp_bridge(bridge); 29 + struct msm_edp *edp = edp_bridge->edp; 30 + 31 + DBG(""); 32 + msm_edp_ctrl_power(edp->ctrl, true); 33 + } 34 + 35 + static void edp_bridge_enable(struct drm_bridge *bridge) 36 + { 37 + DBG(""); 38 + } 39 + 40 + static void edp_bridge_disable(struct drm_bridge *bridge) 41 + { 42 + DBG(""); 43 + } 44 + 45 + static void edp_bridge_post_disable(struct drm_bridge *bridge) 46 + { 47 + struct edp_bridge *edp_bridge = to_edp_bridge(bridge); 48 + struct msm_edp *edp = edp_bridge->edp; 49 + 50 + DBG(""); 51 + msm_edp_ctrl_power(edp->ctrl, false); 52 + } 53 + 54 + static void edp_bridge_mode_set(struct drm_bridge *bridge, 55 + struct drm_display_mode *mode, 56 + struct drm_display_mode *adjusted_mode) 57 + { 58 + struct drm_device *dev = bridge->dev; 59 + struct drm_connector *connector; 60 + struct edp_bridge *edp_bridge = to_edp_bridge(bridge); 61 + struct msm_edp *edp = edp_bridge->edp; 62 + 63 + DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", 64 + mode->base.id, mode->name, 65 + mode->vrefresh, mode->clock, 66 + mode->hdisplay, mode->hsync_start, 67 + mode->hsync_end, mode->htotal, 68 + mode->vdisplay, mode->vsync_start, 69 + mode->vsync_end, mode->vtotal, 70 + mode->type, mode->flags); 71 + 72 + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 73 + if ((connector->encoder != NULL) && 74 + (connector->encoder->bridge == bridge)) { 75 + msm_edp_ctrl_timing_cfg(edp->ctrl, 76 + adjusted_mode, &connector->display_info); 77 + break; 78 + } 79 + } 80 + } 81 + 82 + static const struct drm_bridge_funcs edp_bridge_funcs = { 83 + .pre_enable = edp_bridge_pre_enable, 84 + .enable = edp_bridge_enable, 85 + .disable = edp_bridge_disable, 86 + .post_disable = edp_bridge_post_disable, 87 + .mode_set = edp_bridge_mode_set, 88 + }; 89 + 90 + /* initialize bridge */ 91 + struct drm_bridge *msm_edp_bridge_init(struct msm_edp *edp) 92 + { 93 + struct drm_bridge *bridge = NULL; 94 + struct edp_bridge *edp_bridge; 95 + int ret; 96 + 97 + edp_bridge = devm_kzalloc(edp->dev->dev, 98 + sizeof(*edp_bridge), GFP_KERNEL); 99 + if (!edp_bridge) { 100 + ret = -ENOMEM; 101 + goto fail; 102 + } 103 + 104 + edp_bridge->edp = edp; 105 + 106 + bridge = &edp_bridge->base; 107 + bridge->funcs = &edp_bridge_funcs; 108 + 109 + ret = drm_bridge_attach(edp->dev, bridge); 110 + if (ret) 111 + goto fail; 112 + 113 + return bridge; 114 + 115 + fail: 116 + if (bridge) 117 + edp_bridge_destroy(bridge); 118 + 119 + return ERR_PTR(ret); 120 + }
+161
drivers/gpu/drm/msm/edp/edp_connector.c
··· 1 + /* 2 + * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 and 6 + * only version 2 as published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + */ 13 + 14 + #include "drm/drm_edid.h" 15 + #include "msm_kms.h" 16 + #include "edp.h" 17 + 18 + struct edp_connector { 19 + struct drm_connector base; 20 + struct msm_edp *edp; 21 + }; 22 + #define to_edp_connector(x) container_of(x, struct edp_connector, base) 23 + 24 + static enum drm_connector_status edp_connector_detect( 25 + struct drm_connector *connector, bool force) 26 + { 27 + struct edp_connector *edp_connector = to_edp_connector(connector); 28 + struct msm_edp *edp = edp_connector->edp; 29 + 30 + DBG(""); 31 + return msm_edp_ctrl_panel_connected(edp->ctrl) ? 32 + connector_status_connected : connector_status_disconnected; 33 + } 34 + 35 + static void edp_connector_destroy(struct drm_connector *connector) 36 + { 37 + struct edp_connector *edp_connector = to_edp_connector(connector); 38 + 39 + DBG(""); 40 + drm_connector_unregister(connector); 41 + drm_connector_cleanup(connector); 42 + 43 + kfree(edp_connector); 44 + } 45 + 46 + static int edp_connector_get_modes(struct drm_connector *connector) 47 + { 48 + struct edp_connector *edp_connector = to_edp_connector(connector); 49 + struct msm_edp *edp = edp_connector->edp; 50 + 51 + struct edid *drm_edid = NULL; 52 + int ret = 0; 53 + 54 + DBG(""); 55 + ret = msm_edp_ctrl_get_panel_info(edp->ctrl, connector, &drm_edid); 56 + if (ret) 57 + return ret; 58 + 59 + drm_mode_connector_update_edid_property(connector, drm_edid); 60 + if (drm_edid) 61 + ret = drm_add_edid_modes(connector, drm_edid); 62 + 63 + return ret; 64 + } 65 + 66 + static int edp_connector_mode_valid(struct drm_connector *connector, 67 + struct drm_display_mode *mode) 68 + { 69 + struct edp_connector *edp_connector = to_edp_connector(connector); 70 + struct msm_edp *edp = edp_connector->edp; 71 + struct msm_drm_private *priv = connector->dev->dev_private; 72 + struct msm_kms *kms = priv->kms; 73 + long actual, requested; 74 + 75 + requested = 1000 * mode->clock; 76 + actual = kms->funcs->round_pixclk(kms, 77 + requested, edp_connector->edp->encoder); 78 + 79 + DBG("requested=%ld, actual=%ld", requested, actual); 80 + if (actual != requested) 81 + return MODE_CLOCK_RANGE; 82 + 83 + if (!msm_edp_ctrl_pixel_clock_valid( 84 + edp->ctrl, mode->clock, NULL, NULL)) 85 + return MODE_CLOCK_RANGE; 86 + 87 + /* Invalidate all modes if color format is not supported */ 88 + if (connector->display_info.bpc > 8) 89 + return MODE_BAD; 90 + 91 + return MODE_OK; 92 + } 93 + 94 + static struct drm_encoder * 95 + edp_connector_best_encoder(struct drm_connector *connector) 96 + { 97 + struct edp_connector *edp_connector = to_edp_connector(connector); 98 + 99 + DBG(""); 100 + return edp_connector->edp->encoder; 101 + } 102 + 103 + static const struct drm_connector_funcs edp_connector_funcs = { 104 + .dpms = drm_atomic_helper_connector_dpms, 105 + .detect = edp_connector_detect, 106 + .fill_modes = drm_helper_probe_single_connector_modes, 107 + .destroy = edp_connector_destroy, 108 + .reset = drm_atomic_helper_connector_reset, 109 + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 110 + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 111 + }; 112 + 113 + static const struct drm_connector_helper_funcs edp_connector_helper_funcs = { 114 + .get_modes = edp_connector_get_modes, 115 + .mode_valid = edp_connector_mode_valid, 116 + .best_encoder = edp_connector_best_encoder, 117 + }; 118 + 119 + /* initialize connector */ 120 + struct drm_connector *msm_edp_connector_init(struct msm_edp *edp) 121 + { 122 + struct drm_connector *connector = NULL; 123 + struct edp_connector *edp_connector; 124 + int ret; 125 + 126 + edp_connector = kzalloc(sizeof(*edp_connector), GFP_KERNEL); 127 + if (!edp_connector) { 128 + ret = -ENOMEM; 129 + goto fail; 130 + } 131 + 132 + edp_connector->edp = edp; 133 + 134 + connector = &edp_connector->base; 135 + 136 + ret = drm_connector_init(edp->dev, connector, &edp_connector_funcs, 137 + DRM_MODE_CONNECTOR_eDP); 138 + if (ret) 139 + goto fail; 140 + 141 + drm_connector_helper_add(connector, &edp_connector_helper_funcs); 142 + 143 + /* We don't support HPD, so only poll status until connected. */ 144 + connector->polled = DRM_CONNECTOR_POLL_CONNECT; 145 + 146 + /* Display driver doesn't support interlace now. */ 147 + connector->interlace_allowed = false; 148 + connector->doublescan_allowed = false; 149 + 150 + ret = drm_connector_register(connector); 151 + if (ret) 152 + goto fail; 153 + 154 + return connector; 155 + 156 + fail: 157 + if (connector) 158 + edp_connector_destroy(connector); 159 + 160 + return ERR_PTR(ret); 161 + }
+1390
drivers/gpu/drm/msm/edp/edp_ctrl.c
··· 1 + /* 2 + * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 and 6 + * only version 2 as published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + */ 13 + 14 + #include <linux/clk.h> 15 + #include <linux/gpio/consumer.h> 16 + #include <linux/regulator/consumer.h> 17 + 18 + #include "drm_crtc.h" 19 + #include "drm_dp_helper.h" 20 + #include "drm_edid.h" 21 + #include "edp.h" 22 + #include "edp.xml.h" 23 + 24 + #define VDDA_MIN_UV 1800000 /* uV units */ 25 + #define VDDA_MAX_UV 1800000 /* uV units */ 26 + #define VDDA_UA_ON_LOAD 100000 /* uA units */ 27 + #define VDDA_UA_OFF_LOAD 100 /* uA units */ 28 + 29 + #define DPCD_LINK_VOLTAGE_MAX 4 30 + #define DPCD_LINK_PRE_EMPHASIS_MAX 4 31 + 32 + #define EDP_LINK_BW_MAX DP_LINK_BW_2_7 33 + 34 + /* Link training return value */ 35 + #define EDP_TRAIN_FAIL -1 36 + #define EDP_TRAIN_SUCCESS 0 37 + #define EDP_TRAIN_RECONFIG 1 38 + 39 + #define EDP_CLK_MASK_AHB BIT(0) 40 + #define EDP_CLK_MASK_AUX BIT(1) 41 + #define EDP_CLK_MASK_LINK BIT(2) 42 + #define EDP_CLK_MASK_PIXEL BIT(3) 43 + #define EDP_CLK_MASK_MDP_CORE BIT(4) 44 + #define EDP_CLK_MASK_LINK_CHAN (EDP_CLK_MASK_LINK | EDP_CLK_MASK_PIXEL) 45 + #define EDP_CLK_MASK_AUX_CHAN \ 46 + (EDP_CLK_MASK_AHB | EDP_CLK_MASK_AUX | EDP_CLK_MASK_MDP_CORE) 47 + #define EDP_CLK_MASK_ALL (EDP_CLK_MASK_AUX_CHAN | EDP_CLK_MASK_LINK_CHAN) 48 + 49 + #define EDP_BACKLIGHT_MAX 255 50 + 51 + #define EDP_INTR_STATUS1 \ 52 + (EDP_INTERRUPT_REG_1_HPD | EDP_INTERRUPT_REG_1_AUX_I2C_DONE | \ 53 + EDP_INTERRUPT_REG_1_WRONG_ADDR | EDP_INTERRUPT_REG_1_TIMEOUT | \ 54 + EDP_INTERRUPT_REG_1_NACK_DEFER | EDP_INTERRUPT_REG_1_WRONG_DATA_CNT | \ 55 + EDP_INTERRUPT_REG_1_I2C_NACK | EDP_INTERRUPT_REG_1_I2C_DEFER | \ 56 + EDP_INTERRUPT_REG_1_PLL_UNLOCK | EDP_INTERRUPT_REG_1_AUX_ERROR) 57 + #define EDP_INTR_MASK1 (EDP_INTR_STATUS1 << 2) 58 + #define EDP_INTR_STATUS2 \ 59 + (EDP_INTERRUPT_REG_2_READY_FOR_VIDEO | \ 60 + EDP_INTERRUPT_REG_2_IDLE_PATTERNs_SENT | \ 61 + EDP_INTERRUPT_REG_2_FRAME_END | EDP_INTERRUPT_REG_2_CRC_UPDATED) 62 + #define EDP_INTR_MASK2 (EDP_INTR_STATUS2 << 2) 63 + 64 + struct edp_ctrl { 65 + struct platform_device *pdev; 66 + 67 + void __iomem *base; 68 + 69 + /* regulators */ 70 + struct regulator *vdda_vreg; 71 + struct regulator *lvl_vreg; 72 + 73 + /* clocks */ 74 + struct clk *aux_clk; 75 + struct clk *pixel_clk; 76 + struct clk *ahb_clk; 77 + struct clk *link_clk; 78 + struct clk *mdp_core_clk; 79 + 80 + /* gpios */ 81 + struct gpio_desc *panel_en_gpio; 82 + struct gpio_desc *panel_hpd_gpio; 83 + 84 + /* completion and mutex */ 85 + struct completion idle_comp; 86 + struct mutex dev_mutex; /* To protect device power status */ 87 + 88 + /* work queue */ 89 + struct work_struct on_work; 90 + struct work_struct off_work; 91 + struct workqueue_struct *workqueue; 92 + 93 + /* Interrupt register lock */ 94 + spinlock_t irq_lock; 95 + 96 + bool edp_connected; 97 + bool power_on; 98 + 99 + /* edid raw data */ 100 + struct edid *edid; 101 + 102 + struct drm_dp_link dp_link; 103 + struct drm_dp_aux *drm_aux; 104 + 105 + /* dpcd raw data */ 106 + u8 dpcd[DP_RECEIVER_CAP_SIZE]; 107 + 108 + /* Link status */ 109 + u8 link_rate; 110 + u8 lane_cnt; 111 + u8 v_level; 112 + u8 p_level; 113 + 114 + /* Timing status */ 115 + u8 interlaced; 116 + u32 pixel_rate; /* in kHz */ 117 + u32 color_depth; 118 + 119 + struct edp_aux *aux; 120 + struct edp_phy *phy; 121 + }; 122 + 123 + struct edp_pixel_clk_div { 124 + u32 rate; /* in kHz */ 125 + u32 m; 126 + u32 n; 127 + }; 128 + 129 + #define EDP_PIXEL_CLK_NUM 8 130 + static const struct edp_pixel_clk_div clk_divs[2][EDP_PIXEL_CLK_NUM] = { 131 + { /* Link clock = 162MHz, source clock = 810MHz */ 132 + {119000, 31, 211}, /* WSXGA+ 1680x1050@60Hz CVT */ 133 + {130250, 32, 199}, /* UXGA 1600x1200@60Hz CVT */ 134 + {148500, 11, 60}, /* FHD 1920x1080@60Hz */ 135 + {154000, 50, 263}, /* WUXGA 1920x1200@60Hz CVT */ 136 + {209250, 31, 120}, /* QXGA 2048x1536@60Hz CVT */ 137 + {268500, 119, 359}, /* WQXGA 2560x1600@60Hz CVT */ 138 + {138530, 33, 193}, /* AUO B116HAN03.0 Panel */ 139 + {141400, 48, 275}, /* AUO B133HTN01.2 Panel */ 140 + }, 141 + { /* Link clock = 270MHz, source clock = 675MHz */ 142 + {119000, 52, 295}, /* WSXGA+ 1680x1050@60Hz CVT */ 143 + {130250, 11, 57}, /* UXGA 1600x1200@60Hz CVT */ 144 + {148500, 11, 50}, /* FHD 1920x1080@60Hz */ 145 + {154000, 47, 206}, /* WUXGA 1920x1200@60Hz CVT */ 146 + {209250, 31, 100}, /* QXGA 2048x1536@60Hz CVT */ 147 + {268500, 107, 269}, /* WQXGA 2560x1600@60Hz CVT */ 148 + {138530, 63, 307}, /* AUO B116HAN03.0 Panel */ 149 + {141400, 53, 253}, /* AUO B133HTN01.2 Panel */ 150 + }, 151 + }; 152 + 153 + static int edp_clk_init(struct edp_ctrl *ctrl) 154 + { 155 + struct device *dev = &ctrl->pdev->dev; 156 + int ret; 157 + 158 + ctrl->aux_clk = devm_clk_get(dev, "core_clk"); 159 + if (IS_ERR(ctrl->aux_clk)) { 160 + ret = PTR_ERR(ctrl->aux_clk); 161 + pr_err("%s: Can't find aux_clk, %d\n", __func__, ret); 162 + ctrl->aux_clk = NULL; 163 + return ret; 164 + } 165 + 166 + ctrl->pixel_clk = devm_clk_get(dev, "pixel_clk"); 167 + if (IS_ERR(ctrl->pixel_clk)) { 168 + ret = PTR_ERR(ctrl->pixel_clk); 169 + pr_err("%s: Can't find pixel_clk, %d\n", __func__, ret); 170 + ctrl->pixel_clk = NULL; 171 + return ret; 172 + } 173 + 174 + ctrl->ahb_clk = devm_clk_get(dev, "iface_clk"); 175 + if (IS_ERR(ctrl->ahb_clk)) { 176 + ret = PTR_ERR(ctrl->ahb_clk); 177 + pr_err("%s: Can't find ahb_clk, %d\n", __func__, ret); 178 + ctrl->ahb_clk = NULL; 179 + return ret; 180 + } 181 + 182 + ctrl->link_clk = devm_clk_get(dev, "link_clk"); 183 + if (IS_ERR(ctrl->link_clk)) { 184 + ret = PTR_ERR(ctrl->link_clk); 185 + pr_err("%s: Can't find link_clk, %d\n", __func__, ret); 186 + ctrl->link_clk = NULL; 187 + return ret; 188 + } 189 + 190 + /* need mdp core clock to receive irq */ 191 + ctrl->mdp_core_clk = devm_clk_get(dev, "mdp_core_clk"); 192 + if (IS_ERR(ctrl->mdp_core_clk)) { 193 + ret = PTR_ERR(ctrl->mdp_core_clk); 194 + pr_err("%s: Can't find mdp_core_clk, %d\n", __func__, ret); 195 + ctrl->mdp_core_clk = NULL; 196 + return ret; 197 + } 198 + 199 + return 0; 200 + } 201 + 202 + static int edp_clk_enable(struct edp_ctrl *ctrl, u32 clk_mask) 203 + { 204 + int ret; 205 + 206 + DBG("mask=%x", clk_mask); 207 + /* ahb_clk should be enabled first */ 208 + if (clk_mask & EDP_CLK_MASK_AHB) { 209 + ret = clk_prepare_enable(ctrl->ahb_clk); 210 + if (ret) { 211 + pr_err("%s: Failed to enable ahb clk\n", __func__); 212 + goto f0; 213 + } 214 + } 215 + if (clk_mask & EDP_CLK_MASK_AUX) { 216 + ret = clk_set_rate(ctrl->aux_clk, 19200000); 217 + if (ret) { 218 + pr_err("%s: Failed to set rate aux clk\n", __func__); 219 + goto f1; 220 + } 221 + ret = clk_prepare_enable(ctrl->aux_clk); 222 + if (ret) { 223 + pr_err("%s: Failed to enable aux clk\n", __func__); 224 + goto f1; 225 + } 226 + } 227 + /* Need to set rate and enable link_clk prior to pixel_clk */ 228 + if (clk_mask & EDP_CLK_MASK_LINK) { 229 + DBG("edp->link_clk, set_rate %ld", 230 + (unsigned long)ctrl->link_rate * 27000000); 231 + ret = clk_set_rate(ctrl->link_clk, 232 + (unsigned long)ctrl->link_rate * 27000000); 233 + if (ret) { 234 + pr_err("%s: Failed to set rate to link clk\n", 235 + __func__); 236 + goto f2; 237 + } 238 + 239 + ret = clk_prepare_enable(ctrl->link_clk); 240 + if (ret) { 241 + pr_err("%s: Failed to enable link clk\n", __func__); 242 + goto f2; 243 + } 244 + } 245 + if (clk_mask & EDP_CLK_MASK_PIXEL) { 246 + DBG("edp->pixel_clk, set_rate %ld", 247 + (unsigned long)ctrl->pixel_rate * 1000); 248 + ret = clk_set_rate(ctrl->pixel_clk, 249 + (unsigned long)ctrl->pixel_rate * 1000); 250 + if (ret) { 251 + pr_err("%s: Failed to set rate to pixel clk\n", 252 + __func__); 253 + goto f3; 254 + } 255 + 256 + ret = clk_prepare_enable(ctrl->pixel_clk); 257 + if (ret) { 258 + pr_err("%s: Failed to enable pixel clk\n", __func__); 259 + goto f3; 260 + } 261 + } 262 + if (clk_mask & EDP_CLK_MASK_MDP_CORE) { 263 + ret = clk_prepare_enable(ctrl->mdp_core_clk); 264 + if (ret) { 265 + pr_err("%s: Failed to enable mdp core clk\n", __func__); 266 + goto f4; 267 + } 268 + } 269 + 270 + return 0; 271 + 272 + f4: 273 + if (clk_mask & EDP_CLK_MASK_PIXEL) 274 + clk_disable_unprepare(ctrl->pixel_clk); 275 + f3: 276 + if (clk_mask & EDP_CLK_MASK_LINK) 277 + clk_disable_unprepare(ctrl->link_clk); 278 + f2: 279 + if (clk_mask & EDP_CLK_MASK_AUX) 280 + clk_disable_unprepare(ctrl->aux_clk); 281 + f1: 282 + if (clk_mask & EDP_CLK_MASK_AHB) 283 + clk_disable_unprepare(ctrl->ahb_clk); 284 + f0: 285 + return ret; 286 + } 287 + 288 + static void edp_clk_disable(struct edp_ctrl *ctrl, u32 clk_mask) 289 + { 290 + if (clk_mask & EDP_CLK_MASK_MDP_CORE) 291 + clk_disable_unprepare(ctrl->mdp_core_clk); 292 + if (clk_mask & EDP_CLK_MASK_PIXEL) 293 + clk_disable_unprepare(ctrl->pixel_clk); 294 + if (clk_mask & EDP_CLK_MASK_LINK) 295 + clk_disable_unprepare(ctrl->link_clk); 296 + if (clk_mask & EDP_CLK_MASK_AUX) 297 + clk_disable_unprepare(ctrl->aux_clk); 298 + if (clk_mask & EDP_CLK_MASK_AHB) 299 + clk_disable_unprepare(ctrl->ahb_clk); 300 + } 301 + 302 + static int edp_regulator_init(struct edp_ctrl *ctrl) 303 + { 304 + struct device *dev = &ctrl->pdev->dev; 305 + 306 + DBG(""); 307 + ctrl->vdda_vreg = devm_regulator_get(dev, "vdda"); 308 + if (IS_ERR(ctrl->vdda_vreg)) { 309 + pr_err("%s: Could not get vdda reg, ret = %ld\n", __func__, 310 + PTR_ERR(ctrl->vdda_vreg)); 311 + ctrl->vdda_vreg = NULL; 312 + return PTR_ERR(ctrl->vdda_vreg); 313 + } 314 + ctrl->lvl_vreg = devm_regulator_get(dev, "lvl-vdd"); 315 + if (IS_ERR(ctrl->lvl_vreg)) { 316 + pr_err("Could not get lvl-vdd reg, %ld", 317 + PTR_ERR(ctrl->lvl_vreg)); 318 + ctrl->lvl_vreg = NULL; 319 + return PTR_ERR(ctrl->lvl_vreg); 320 + } 321 + 322 + return 0; 323 + } 324 + 325 + static int edp_regulator_enable(struct edp_ctrl *ctrl) 326 + { 327 + int ret; 328 + 329 + ret = regulator_set_voltage(ctrl->vdda_vreg, VDDA_MIN_UV, VDDA_MAX_UV); 330 + if (ret) { 331 + pr_err("%s:vdda_vreg set_voltage failed, %d\n", __func__, ret); 332 + goto vdda_set_fail; 333 + } 334 + 335 + ret = regulator_set_optimum_mode(ctrl->vdda_vreg, VDDA_UA_ON_LOAD); 336 + if (ret < 0) { 337 + pr_err("%s: vdda_vreg set regulator mode failed.\n", __func__); 338 + goto vdda_set_fail; 339 + } 340 + 341 + ret = regulator_enable(ctrl->vdda_vreg); 342 + if (ret) { 343 + pr_err("%s: Failed to enable vdda_vreg regulator.\n", __func__); 344 + goto vdda_enable_fail; 345 + } 346 + 347 + ret = regulator_enable(ctrl->lvl_vreg); 348 + if (ret) { 349 + pr_err("Failed to enable lvl-vdd reg regulator, %d", ret); 350 + goto lvl_enable_fail; 351 + } 352 + 353 + DBG("exit"); 354 + return 0; 355 + 356 + lvl_enable_fail: 357 + regulator_disable(ctrl->vdda_vreg); 358 + vdda_enable_fail: 359 + regulator_set_optimum_mode(ctrl->vdda_vreg, VDDA_UA_OFF_LOAD); 360 + vdda_set_fail: 361 + return ret; 362 + } 363 + 364 + static void edp_regulator_disable(struct edp_ctrl *ctrl) 365 + { 366 + regulator_disable(ctrl->lvl_vreg); 367 + regulator_disable(ctrl->vdda_vreg); 368 + regulator_set_optimum_mode(ctrl->vdda_vreg, VDDA_UA_OFF_LOAD); 369 + } 370 + 371 + static int edp_gpio_config(struct edp_ctrl *ctrl) 372 + { 373 + struct device *dev = &ctrl->pdev->dev; 374 + int ret; 375 + 376 + ctrl->panel_hpd_gpio = devm_gpiod_get(dev, "panel-hpd"); 377 + if (IS_ERR(ctrl->panel_hpd_gpio)) { 378 + ret = PTR_ERR(ctrl->panel_hpd_gpio); 379 + ctrl->panel_hpd_gpio = NULL; 380 + pr_err("%s: cannot get panel-hpd-gpios, %d\n", __func__, ret); 381 + return ret; 382 + } 383 + 384 + ret = gpiod_direction_input(ctrl->panel_hpd_gpio); 385 + if (ret) { 386 + pr_err("%s: Set direction for hpd failed, %d\n", __func__, ret); 387 + return ret; 388 + } 389 + 390 + ctrl->panel_en_gpio = devm_gpiod_get(dev, "panel-en"); 391 + if (IS_ERR(ctrl->panel_en_gpio)) { 392 + ret = PTR_ERR(ctrl->panel_en_gpio); 393 + ctrl->panel_en_gpio = NULL; 394 + pr_err("%s: cannot get panel-en-gpios, %d\n", __func__, ret); 395 + return ret; 396 + } 397 + 398 + ret = gpiod_direction_output(ctrl->panel_en_gpio, 0); 399 + if (ret) { 400 + pr_err("%s: Set direction for panel_en failed, %d\n", 401 + __func__, ret); 402 + return ret; 403 + } 404 + 405 + DBG("gpio on"); 406 + 407 + return 0; 408 + } 409 + 410 + static void edp_ctrl_irq_enable(struct edp_ctrl *ctrl, int enable) 411 + { 412 + unsigned long flags; 413 + 414 + DBG("%d", enable); 415 + spin_lock_irqsave(&ctrl->irq_lock, flags); 416 + if (enable) { 417 + edp_write(ctrl->base + REG_EDP_INTERRUPT_REG_1, EDP_INTR_MASK1); 418 + edp_write(ctrl->base + REG_EDP_INTERRUPT_REG_2, EDP_INTR_MASK2); 419 + } else { 420 + edp_write(ctrl->base + REG_EDP_INTERRUPT_REG_1, 0x0); 421 + edp_write(ctrl->base + REG_EDP_INTERRUPT_REG_2, 0x0); 422 + } 423 + spin_unlock_irqrestore(&ctrl->irq_lock, flags); 424 + DBG("exit"); 425 + } 426 + 427 + static void edp_fill_link_cfg(struct edp_ctrl *ctrl) 428 + { 429 + u32 prate; 430 + u32 lrate; 431 + u32 bpp; 432 + u8 max_lane = ctrl->dp_link.num_lanes; 433 + u8 lane; 434 + 435 + prate = ctrl->pixel_rate; 436 + bpp = ctrl->color_depth * 3; 437 + 438 + /* 439 + * By default, use the maximum link rate and minimum lane count, 440 + * so that we can do rate down shift during link training. 441 + */ 442 + ctrl->link_rate = drm_dp_link_rate_to_bw_code(ctrl->dp_link.rate); 443 + 444 + prate *= bpp; 445 + prate /= 8; /* in kByte */ 446 + 447 + lrate = 270000; /* in kHz */ 448 + lrate *= ctrl->link_rate; 449 + lrate /= 10; /* in kByte, 10 bits --> 8 bits */ 450 + 451 + for (lane = 1; lane <= max_lane; lane <<= 1) { 452 + if (lrate >= prate) 453 + break; 454 + lrate <<= 1; 455 + } 456 + 457 + ctrl->lane_cnt = lane; 458 + DBG("rate=%d lane=%d", ctrl->link_rate, ctrl->lane_cnt); 459 + } 460 + 461 + static int edp_sink_power_state(struct edp_ctrl *ctrl, u8 state) 462 + { 463 + u8 s = state; 464 + 465 + DBG("%d", s); 466 + 467 + if (ctrl->dp_link.revision < 0x11) 468 + return 0; 469 + 470 + if (drm_dp_dpcd_write(ctrl->drm_aux, DP_SET_POWER, &s, 1) < 1) { 471 + pr_err("%s: Set power state to panel failed\n", __func__); 472 + return -ENOLINK; 473 + } 474 + 475 + return 0; 476 + } 477 + 478 + static void edp_config_ctrl(struct edp_ctrl *ctrl) 479 + { 480 + u32 data; 481 + enum edp_color_depth depth; 482 + 483 + data = EDP_CONFIGURATION_CTRL_LANES(ctrl->lane_cnt - 1); 484 + 485 + if (ctrl->dp_link.capabilities & DP_LINK_CAP_ENHANCED_FRAMING) 486 + data |= EDP_CONFIGURATION_CTRL_ENHANCED_FRAMING; 487 + 488 + depth = EDP_6BIT; 489 + if (ctrl->color_depth == 8) 490 + depth = EDP_8BIT; 491 + 492 + data |= EDP_CONFIGURATION_CTRL_COLOR(depth); 493 + 494 + if (!ctrl->interlaced) /* progressive */ 495 + data |= EDP_CONFIGURATION_CTRL_PROGRESSIVE; 496 + 497 + data |= (EDP_CONFIGURATION_CTRL_SYNC_CLK | 498 + EDP_CONFIGURATION_CTRL_STATIC_MVID); 499 + 500 + edp_write(ctrl->base + REG_EDP_CONFIGURATION_CTRL, data); 501 + } 502 + 503 + static void edp_state_ctrl(struct edp_ctrl *ctrl, u32 state) 504 + { 505 + edp_write(ctrl->base + REG_EDP_STATE_CTRL, state); 506 + /* Make sure H/W status is set */ 507 + wmb(); 508 + } 509 + 510 + static int edp_lane_set_write(struct edp_ctrl *ctrl, 511 + u8 voltage_level, u8 pre_emphasis_level) 512 + { 513 + int i; 514 + u8 buf[4]; 515 + 516 + if (voltage_level >= DPCD_LINK_VOLTAGE_MAX) 517 + voltage_level |= 0x04; 518 + 519 + if (pre_emphasis_level >= DPCD_LINK_PRE_EMPHASIS_MAX) 520 + pre_emphasis_level |= 0x04; 521 + 522 + pre_emphasis_level <<= 3; 523 + 524 + for (i = 0; i < 4; i++) 525 + buf[i] = voltage_level | pre_emphasis_level; 526 + 527 + DBG("%s: p|v=0x%x", __func__, voltage_level | pre_emphasis_level); 528 + if (drm_dp_dpcd_write(ctrl->drm_aux, 0x103, buf, 4) < 4) { 529 + pr_err("%s: Set sw/pe to panel failed\n", __func__); 530 + return -ENOLINK; 531 + } 532 + 533 + return 0; 534 + } 535 + 536 + static int edp_train_pattern_set_write(struct edp_ctrl *ctrl, u8 pattern) 537 + { 538 + u8 p = pattern; 539 + 540 + DBG("pattern=%x", p); 541 + if (drm_dp_dpcd_write(ctrl->drm_aux, 542 + DP_TRAINING_PATTERN_SET, &p, 1) < 1) { 543 + pr_err("%s: Set training pattern to panel failed\n", __func__); 544 + return -ENOLINK; 545 + } 546 + 547 + return 0; 548 + } 549 + 550 + static void edp_sink_train_set_adjust(struct edp_ctrl *ctrl, 551 + const u8 *link_status) 552 + { 553 + int i; 554 + u8 max = 0; 555 + u8 data; 556 + 557 + /* use the max level across lanes */ 558 + for (i = 0; i < ctrl->lane_cnt; i++) { 559 + data = drm_dp_get_adjust_request_voltage(link_status, i); 560 + DBG("lane=%d req_voltage_swing=0x%x", i, data); 561 + if (max < data) 562 + max = data; 563 + } 564 + 565 + ctrl->v_level = max >> DP_TRAIN_VOLTAGE_SWING_SHIFT; 566 + 567 + /* use the max level across lanes */ 568 + max = 0; 569 + for (i = 0; i < ctrl->lane_cnt; i++) { 570 + data = drm_dp_get_adjust_request_pre_emphasis(link_status, i); 571 + DBG("lane=%d req_pre_emphasis=0x%x", i, data); 572 + if (max < data) 573 + max = data; 574 + } 575 + 576 + ctrl->p_level = max >> DP_TRAIN_PRE_EMPHASIS_SHIFT; 577 + DBG("v_level=%d, p_level=%d", ctrl->v_level, ctrl->p_level); 578 + } 579 + 580 + static void edp_host_train_set(struct edp_ctrl *ctrl, u32 train) 581 + { 582 + int cnt = 10; 583 + u32 data; 584 + u32 shift = train - 1; 585 + 586 + DBG("train=%d", train); 587 + 588 + edp_state_ctrl(ctrl, EDP_STATE_CTRL_TRAIN_PATTERN_1 << shift); 589 + while (--cnt) { 590 + data = edp_read(ctrl->base + REG_EDP_MAINLINK_READY); 591 + if (data & (EDP_MAINLINK_READY_TRAIN_PATTERN_1_READY << shift)) 592 + break; 593 + } 594 + 595 + if (cnt == 0) 596 + pr_err("%s: set link_train=%d failed\n", __func__, train); 597 + } 598 + 599 + static const u8 vm_pre_emphasis[4][4] = { 600 + {0x03, 0x06, 0x09, 0x0C}, /* pe0, 0 db */ 601 + {0x03, 0x06, 0x09, 0xFF}, /* pe1, 3.5 db */ 602 + {0x03, 0x06, 0xFF, 0xFF}, /* pe2, 6.0 db */ 603 + {0x03, 0xFF, 0xFF, 0xFF} /* pe3, 9.5 db */ 604 + }; 605 + 606 + /* voltage swing, 0.2v and 1.0v are not support */ 607 + static const u8 vm_voltage_swing[4][4] = { 608 + {0x14, 0x18, 0x1A, 0x1E}, /* sw0, 0.4v */ 609 + {0x18, 0x1A, 0x1E, 0xFF}, /* sw1, 0.6 v */ 610 + {0x1A, 0x1E, 0xFF, 0xFF}, /* sw1, 0.8 v */ 611 + {0x1E, 0xFF, 0xFF, 0xFF} /* sw1, 1.2 v, optional */ 612 + }; 613 + 614 + static int edp_voltage_pre_emphasise_set(struct edp_ctrl *ctrl) 615 + { 616 + u32 value0; 617 + u32 value1; 618 + 619 + DBG("v=%d p=%d", ctrl->v_level, ctrl->p_level); 620 + 621 + value0 = vm_pre_emphasis[(int)(ctrl->v_level)][(int)(ctrl->p_level)]; 622 + value1 = vm_voltage_swing[(int)(ctrl->v_level)][(int)(ctrl->p_level)]; 623 + 624 + /* Configure host and panel only if both values are allowed */ 625 + if (value0 != 0xFF && value1 != 0xFF) { 626 + msm_edp_phy_vm_pe_cfg(ctrl->phy, value0, value1); 627 + return edp_lane_set_write(ctrl, ctrl->v_level, ctrl->p_level); 628 + } 629 + 630 + return -EINVAL; 631 + } 632 + 633 + static int edp_start_link_train_1(struct edp_ctrl *ctrl) 634 + { 635 + u8 link_status[DP_LINK_STATUS_SIZE]; 636 + u8 old_v_level; 637 + int tries; 638 + int ret; 639 + int rlen; 640 + 641 + DBG(""); 642 + 643 + edp_host_train_set(ctrl, DP_TRAINING_PATTERN_1); 644 + ret = edp_voltage_pre_emphasise_set(ctrl); 645 + if (ret) 646 + return ret; 647 + ret = edp_train_pattern_set_write(ctrl, 648 + DP_TRAINING_PATTERN_1 | DP_RECOVERED_CLOCK_OUT_EN); 649 + if (ret) 650 + return ret; 651 + 652 + tries = 0; 653 + old_v_level = ctrl->v_level; 654 + while (1) { 655 + drm_dp_link_train_clock_recovery_delay(ctrl->dpcd); 656 + 657 + rlen = drm_dp_dpcd_read_link_status(ctrl->drm_aux, link_status); 658 + if (rlen < DP_LINK_STATUS_SIZE) { 659 + pr_err("%s: read link status failed\n", __func__); 660 + return -ENOLINK; 661 + } 662 + if (drm_dp_clock_recovery_ok(link_status, ctrl->lane_cnt)) { 663 + ret = 0; 664 + break; 665 + } 666 + 667 + if (ctrl->v_level == DPCD_LINK_VOLTAGE_MAX) { 668 + ret = -1; 669 + break; 670 + } 671 + 672 + if (old_v_level == ctrl->v_level) { 673 + tries++; 674 + if (tries >= 5) { 675 + ret = -1; 676 + break; 677 + } 678 + } else { 679 + tries = 0; 680 + old_v_level = ctrl->v_level; 681 + } 682 + 683 + edp_sink_train_set_adjust(ctrl, link_status); 684 + ret = edp_voltage_pre_emphasise_set(ctrl); 685 + if (ret) 686 + return ret; 687 + } 688 + 689 + return ret; 690 + } 691 + 692 + static int edp_start_link_train_2(struct edp_ctrl *ctrl) 693 + { 694 + u8 link_status[DP_LINK_STATUS_SIZE]; 695 + int tries = 0; 696 + int ret; 697 + int rlen; 698 + 699 + DBG(""); 700 + 701 + edp_host_train_set(ctrl, DP_TRAINING_PATTERN_2); 702 + ret = edp_voltage_pre_emphasise_set(ctrl); 703 + if (ret) 704 + return ret; 705 + 706 + ret = edp_train_pattern_set_write(ctrl, 707 + DP_TRAINING_PATTERN_2 | DP_RECOVERED_CLOCK_OUT_EN); 708 + if (ret) 709 + return ret; 710 + 711 + while (1) { 712 + drm_dp_link_train_channel_eq_delay(ctrl->dpcd); 713 + 714 + rlen = drm_dp_dpcd_read_link_status(ctrl->drm_aux, link_status); 715 + if (rlen < DP_LINK_STATUS_SIZE) { 716 + pr_err("%s: read link status failed\n", __func__); 717 + return -ENOLINK; 718 + } 719 + if (drm_dp_channel_eq_ok(link_status, ctrl->lane_cnt)) { 720 + ret = 0; 721 + break; 722 + } 723 + 724 + tries++; 725 + if (tries > 10) { 726 + ret = -1; 727 + break; 728 + } 729 + 730 + edp_sink_train_set_adjust(ctrl, link_status); 731 + ret = edp_voltage_pre_emphasise_set(ctrl); 732 + if (ret) 733 + return ret; 734 + } 735 + 736 + return ret; 737 + } 738 + 739 + static int edp_link_rate_down_shift(struct edp_ctrl *ctrl) 740 + { 741 + u32 prate, lrate, bpp; 742 + u8 rate, lane, max_lane; 743 + int changed = 0; 744 + 745 + rate = ctrl->link_rate; 746 + lane = ctrl->lane_cnt; 747 + max_lane = ctrl->dp_link.num_lanes; 748 + 749 + bpp = ctrl->color_depth * 3; 750 + prate = ctrl->pixel_rate; 751 + prate *= bpp; 752 + prate /= 8; /* in kByte */ 753 + 754 + if (rate > DP_LINK_BW_1_62 && rate <= EDP_LINK_BW_MAX) { 755 + rate -= 4; /* reduce rate */ 756 + changed++; 757 + } 758 + 759 + if (changed) { 760 + if (lane >= 1 && lane < max_lane) 761 + lane <<= 1; /* increase lane */ 762 + 763 + lrate = 270000; /* in kHz */ 764 + lrate *= rate; 765 + lrate /= 10; /* kByte, 10 bits --> 8 bits */ 766 + lrate *= lane; 767 + 768 + DBG("new lrate=%u prate=%u(kHz) rate=%d lane=%d p=%u b=%d", 769 + lrate, prate, rate, lane, 770 + ctrl->pixel_rate, 771 + bpp); 772 + 773 + if (lrate > prate) { 774 + ctrl->link_rate = rate; 775 + ctrl->lane_cnt = lane; 776 + DBG("new rate=%d %d", rate, lane); 777 + return 0; 778 + } 779 + } 780 + 781 + return -EINVAL; 782 + } 783 + 784 + static int edp_clear_training_pattern(struct edp_ctrl *ctrl) 785 + { 786 + int ret; 787 + 788 + ret = edp_train_pattern_set_write(ctrl, 0); 789 + 790 + drm_dp_link_train_channel_eq_delay(ctrl->dpcd); 791 + 792 + return ret; 793 + } 794 + 795 + static int edp_do_link_train(struct edp_ctrl *ctrl) 796 + { 797 + int ret; 798 + struct drm_dp_link dp_link; 799 + 800 + DBG(""); 801 + /* 802 + * Set the current link rate and lane cnt to panel. They may have been 803 + * adjusted and the values are different from them in DPCD CAP 804 + */ 805 + dp_link.num_lanes = ctrl->lane_cnt; 806 + dp_link.rate = drm_dp_bw_code_to_link_rate(ctrl->link_rate); 807 + dp_link.capabilities = ctrl->dp_link.capabilities; 808 + if (drm_dp_link_configure(ctrl->drm_aux, &dp_link) < 0) 809 + return EDP_TRAIN_FAIL; 810 + 811 + ctrl->v_level = 0; /* start from default level */ 812 + ctrl->p_level = 0; 813 + 814 + edp_state_ctrl(ctrl, 0); 815 + if (edp_clear_training_pattern(ctrl)) 816 + return EDP_TRAIN_FAIL; 817 + 818 + ret = edp_start_link_train_1(ctrl); 819 + if (ret < 0) { 820 + if (edp_link_rate_down_shift(ctrl) == 0) { 821 + DBG("link reconfig"); 822 + ret = EDP_TRAIN_RECONFIG; 823 + goto clear; 824 + } else { 825 + pr_err("%s: Training 1 failed", __func__); 826 + ret = EDP_TRAIN_FAIL; 827 + goto clear; 828 + } 829 + } 830 + DBG("Training 1 completed successfully"); 831 + 832 + edp_state_ctrl(ctrl, 0); 833 + if (edp_clear_training_pattern(ctrl)) 834 + return EDP_TRAIN_FAIL; 835 + 836 + ret = edp_start_link_train_2(ctrl); 837 + if (ret < 0) { 838 + if (edp_link_rate_down_shift(ctrl) == 0) { 839 + DBG("link reconfig"); 840 + ret = EDP_TRAIN_RECONFIG; 841 + goto clear; 842 + } else { 843 + pr_err("%s: Training 2 failed", __func__); 844 + ret = EDP_TRAIN_FAIL; 845 + goto clear; 846 + } 847 + } 848 + DBG("Training 2 completed successfully"); 849 + 850 + edp_state_ctrl(ctrl, EDP_STATE_CTRL_SEND_VIDEO); 851 + clear: 852 + edp_clear_training_pattern(ctrl); 853 + 854 + return ret; 855 + } 856 + 857 + static void edp_clock_synchrous(struct edp_ctrl *ctrl, int sync) 858 + { 859 + u32 data; 860 + enum edp_color_depth depth; 861 + 862 + data = edp_read(ctrl->base + REG_EDP_MISC1_MISC0); 863 + 864 + if (sync) 865 + data |= EDP_MISC1_MISC0_SYNC; 866 + else 867 + data &= ~EDP_MISC1_MISC0_SYNC; 868 + 869 + /* only legacy rgb mode supported */ 870 + depth = EDP_6BIT; /* Default */ 871 + if (ctrl->color_depth == 8) 872 + depth = EDP_8BIT; 873 + else if (ctrl->color_depth == 10) 874 + depth = EDP_10BIT; 875 + else if (ctrl->color_depth == 12) 876 + depth = EDP_12BIT; 877 + else if (ctrl->color_depth == 16) 878 + depth = EDP_16BIT; 879 + 880 + data |= EDP_MISC1_MISC0_COLOR(depth); 881 + 882 + edp_write(ctrl->base + REG_EDP_MISC1_MISC0, data); 883 + } 884 + 885 + static int edp_sw_mvid_nvid(struct edp_ctrl *ctrl, u32 m, u32 n) 886 + { 887 + u32 n_multi, m_multi = 5; 888 + 889 + if (ctrl->link_rate == DP_LINK_BW_1_62) { 890 + n_multi = 1; 891 + } else if (ctrl->link_rate == DP_LINK_BW_2_7) { 892 + n_multi = 2; 893 + } else { 894 + pr_err("%s: Invalid link rate, %d\n", __func__, 895 + ctrl->link_rate); 896 + return -EINVAL; 897 + } 898 + 899 + edp_write(ctrl->base + REG_EDP_SOFTWARE_MVID, m * m_multi); 900 + edp_write(ctrl->base + REG_EDP_SOFTWARE_NVID, n * n_multi); 901 + 902 + return 0; 903 + } 904 + 905 + static void edp_mainlink_ctrl(struct edp_ctrl *ctrl, int enable) 906 + { 907 + u32 data = 0; 908 + 909 + edp_write(ctrl->base + REG_EDP_MAINLINK_CTRL, EDP_MAINLINK_CTRL_RESET); 910 + /* Make sure fully reset */ 911 + wmb(); 912 + usleep_range(500, 1000); 913 + 914 + if (enable) 915 + data |= EDP_MAINLINK_CTRL_ENABLE; 916 + 917 + edp_write(ctrl->base + REG_EDP_MAINLINK_CTRL, data); 918 + } 919 + 920 + static void edp_ctrl_phy_aux_enable(struct edp_ctrl *ctrl, int enable) 921 + { 922 + if (enable) { 923 + edp_regulator_enable(ctrl); 924 + edp_clk_enable(ctrl, EDP_CLK_MASK_AUX_CHAN); 925 + msm_edp_phy_ctrl(ctrl->phy, 1); 926 + msm_edp_aux_ctrl(ctrl->aux, 1); 927 + gpiod_set_value(ctrl->panel_en_gpio, 1); 928 + } else { 929 + gpiod_set_value(ctrl->panel_en_gpio, 0); 930 + msm_edp_aux_ctrl(ctrl->aux, 0); 931 + msm_edp_phy_ctrl(ctrl->phy, 0); 932 + edp_clk_disable(ctrl, EDP_CLK_MASK_AUX_CHAN); 933 + edp_regulator_disable(ctrl); 934 + } 935 + } 936 + 937 + static void edp_ctrl_link_enable(struct edp_ctrl *ctrl, int enable) 938 + { 939 + u32 m, n; 940 + 941 + if (enable) { 942 + /* Enable link channel clocks */ 943 + edp_clk_enable(ctrl, EDP_CLK_MASK_LINK_CHAN); 944 + 945 + msm_edp_phy_lane_power_ctrl(ctrl->phy, true, ctrl->lane_cnt); 946 + 947 + msm_edp_phy_vm_pe_init(ctrl->phy); 948 + 949 + /* Make sure phy is programed */ 950 + wmb(); 951 + msm_edp_phy_ready(ctrl->phy); 952 + 953 + edp_config_ctrl(ctrl); 954 + msm_edp_ctrl_pixel_clock_valid(ctrl, ctrl->pixel_rate, &m, &n); 955 + edp_sw_mvid_nvid(ctrl, m, n); 956 + edp_mainlink_ctrl(ctrl, 1); 957 + } else { 958 + edp_mainlink_ctrl(ctrl, 0); 959 + 960 + msm_edp_phy_lane_power_ctrl(ctrl->phy, false, 0); 961 + edp_clk_disable(ctrl, EDP_CLK_MASK_LINK_CHAN); 962 + } 963 + } 964 + 965 + static int edp_ctrl_training(struct edp_ctrl *ctrl) 966 + { 967 + int ret; 968 + 969 + /* Do link training only when power is on */ 970 + if (!ctrl->power_on) 971 + return -EINVAL; 972 + 973 + train_start: 974 + ret = edp_do_link_train(ctrl); 975 + if (ret == EDP_TRAIN_RECONFIG) { 976 + /* Re-configure main link */ 977 + edp_ctrl_irq_enable(ctrl, 0); 978 + edp_ctrl_link_enable(ctrl, 0); 979 + msm_edp_phy_ctrl(ctrl->phy, 0); 980 + 981 + /* Make sure link is fully disabled */ 982 + wmb(); 983 + usleep_range(500, 1000); 984 + 985 + msm_edp_phy_ctrl(ctrl->phy, 1); 986 + edp_ctrl_link_enable(ctrl, 1); 987 + edp_ctrl_irq_enable(ctrl, 1); 988 + goto train_start; 989 + } 990 + 991 + return ret; 992 + } 993 + 994 + static void edp_ctrl_on_worker(struct work_struct *work) 995 + { 996 + struct edp_ctrl *ctrl = container_of( 997 + work, struct edp_ctrl, on_work); 998 + int ret; 999 + 1000 + mutex_lock(&ctrl->dev_mutex); 1001 + 1002 + if (ctrl->power_on) { 1003 + DBG("already on"); 1004 + goto unlock_ret; 1005 + } 1006 + 1007 + edp_ctrl_phy_aux_enable(ctrl, 1); 1008 + edp_ctrl_link_enable(ctrl, 1); 1009 + 1010 + edp_ctrl_irq_enable(ctrl, 1); 1011 + ret = edp_sink_power_state(ctrl, DP_SET_POWER_D0); 1012 + if (ret) 1013 + goto fail; 1014 + 1015 + ctrl->power_on = true; 1016 + 1017 + /* Start link training */ 1018 + ret = edp_ctrl_training(ctrl); 1019 + if (ret != EDP_TRAIN_SUCCESS) 1020 + goto fail; 1021 + 1022 + DBG("DONE"); 1023 + goto unlock_ret; 1024 + 1025 + fail: 1026 + edp_ctrl_irq_enable(ctrl, 0); 1027 + edp_ctrl_link_enable(ctrl, 0); 1028 + edp_ctrl_phy_aux_enable(ctrl, 0); 1029 + ctrl->power_on = false; 1030 + unlock_ret: 1031 + mutex_unlock(&ctrl->dev_mutex); 1032 + } 1033 + 1034 + static void edp_ctrl_off_worker(struct work_struct *work) 1035 + { 1036 + struct edp_ctrl *ctrl = container_of( 1037 + work, struct edp_ctrl, off_work); 1038 + int ret; 1039 + 1040 + mutex_lock(&ctrl->dev_mutex); 1041 + 1042 + if (!ctrl->power_on) { 1043 + DBG("already off"); 1044 + goto unlock_ret; 1045 + } 1046 + 1047 + reinit_completion(&ctrl->idle_comp); 1048 + edp_state_ctrl(ctrl, EDP_STATE_CTRL_PUSH_IDLE); 1049 + 1050 + ret = wait_for_completion_timeout(&ctrl->idle_comp, 1051 + msecs_to_jiffies(500)); 1052 + if (ret <= 0) 1053 + DBG("%s: idle pattern timedout, %d\n", 1054 + __func__, ret); 1055 + 1056 + edp_state_ctrl(ctrl, 0); 1057 + 1058 + edp_sink_power_state(ctrl, DP_SET_POWER_D3); 1059 + 1060 + edp_ctrl_irq_enable(ctrl, 0); 1061 + 1062 + edp_ctrl_link_enable(ctrl, 0); 1063 + 1064 + edp_ctrl_phy_aux_enable(ctrl, 0); 1065 + 1066 + ctrl->power_on = false; 1067 + 1068 + unlock_ret: 1069 + mutex_unlock(&ctrl->dev_mutex); 1070 + } 1071 + 1072 + irqreturn_t msm_edp_ctrl_irq(struct edp_ctrl *ctrl) 1073 + { 1074 + u32 isr1, isr2, mask1, mask2; 1075 + u32 ack; 1076 + 1077 + DBG(""); 1078 + spin_lock(&ctrl->irq_lock); 1079 + isr1 = edp_read(ctrl->base + REG_EDP_INTERRUPT_REG_1); 1080 + isr2 = edp_read(ctrl->base + REG_EDP_INTERRUPT_REG_2); 1081 + 1082 + mask1 = isr1 & EDP_INTR_MASK1; 1083 + mask2 = isr2 & EDP_INTR_MASK2; 1084 + 1085 + isr1 &= ~mask1; /* remove masks bit */ 1086 + isr2 &= ~mask2; 1087 + 1088 + DBG("isr=%x mask=%x isr2=%x mask2=%x", 1089 + isr1, mask1, isr2, mask2); 1090 + 1091 + ack = isr1 & EDP_INTR_STATUS1; 1092 + ack <<= 1; /* ack bits */ 1093 + ack |= mask1; 1094 + edp_write(ctrl->base + REG_EDP_INTERRUPT_REG_1, ack); 1095 + 1096 + ack = isr2 & EDP_INTR_STATUS2; 1097 + ack <<= 1; /* ack bits */ 1098 + ack |= mask2; 1099 + edp_write(ctrl->base + REG_EDP_INTERRUPT_REG_2, ack); 1100 + spin_unlock(&ctrl->irq_lock); 1101 + 1102 + if (isr1 & EDP_INTERRUPT_REG_1_HPD) 1103 + DBG("edp_hpd"); 1104 + 1105 + if (isr2 & EDP_INTERRUPT_REG_2_READY_FOR_VIDEO) 1106 + DBG("edp_video_ready"); 1107 + 1108 + if (isr2 & EDP_INTERRUPT_REG_2_IDLE_PATTERNs_SENT) { 1109 + DBG("idle_patterns_sent"); 1110 + complete(&ctrl->idle_comp); 1111 + } 1112 + 1113 + msm_edp_aux_irq(ctrl->aux, isr1); 1114 + 1115 + return IRQ_HANDLED; 1116 + } 1117 + 1118 + void msm_edp_ctrl_power(struct edp_ctrl *ctrl, bool on) 1119 + { 1120 + if (on) 1121 + queue_work(ctrl->workqueue, &ctrl->on_work); 1122 + else 1123 + queue_work(ctrl->workqueue, &ctrl->off_work); 1124 + } 1125 + 1126 + int msm_edp_ctrl_init(struct msm_edp *edp) 1127 + { 1128 + struct edp_ctrl *ctrl = NULL; 1129 + struct device *dev = &edp->pdev->dev; 1130 + int ret; 1131 + 1132 + if (!edp) { 1133 + pr_err("%s: edp is NULL!\n", __func__); 1134 + return -EINVAL; 1135 + } 1136 + 1137 + ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL); 1138 + if (!ctrl) 1139 + return -ENOMEM; 1140 + 1141 + edp->ctrl = ctrl; 1142 + ctrl->pdev = edp->pdev; 1143 + 1144 + ctrl->base = msm_ioremap(ctrl->pdev, "edp", "eDP"); 1145 + if (IS_ERR(ctrl->base)) 1146 + return PTR_ERR(ctrl->base); 1147 + 1148 + /* Get regulator, clock, gpio, pwm */ 1149 + ret = edp_regulator_init(ctrl); 1150 + if (ret) { 1151 + pr_err("%s:regulator init fail\n", __func__); 1152 + return ret; 1153 + } 1154 + ret = edp_clk_init(ctrl); 1155 + if (ret) { 1156 + pr_err("%s:clk init fail\n", __func__); 1157 + return ret; 1158 + } 1159 + ret = edp_gpio_config(ctrl); 1160 + if (ret) { 1161 + pr_err("%s:failed to configure GPIOs: %d", __func__, ret); 1162 + return ret; 1163 + } 1164 + 1165 + /* Init aux and phy */ 1166 + ctrl->aux = msm_edp_aux_init(dev, ctrl->base, &ctrl->drm_aux); 1167 + if (!ctrl->aux || !ctrl->drm_aux) { 1168 + pr_err("%s:failed to init aux\n", __func__); 1169 + return ret; 1170 + } 1171 + 1172 + ctrl->phy = msm_edp_phy_init(dev, ctrl->base); 1173 + if (!ctrl->phy) { 1174 + pr_err("%s:failed to init phy\n", __func__); 1175 + goto err_destory_aux; 1176 + } 1177 + 1178 + spin_lock_init(&ctrl->irq_lock); 1179 + mutex_init(&ctrl->dev_mutex); 1180 + init_completion(&ctrl->idle_comp); 1181 + 1182 + /* setup workqueue */ 1183 + ctrl->workqueue = alloc_ordered_workqueue("edp_drm_work", 0); 1184 + INIT_WORK(&ctrl->on_work, edp_ctrl_on_worker); 1185 + INIT_WORK(&ctrl->off_work, edp_ctrl_off_worker); 1186 + 1187 + return 0; 1188 + 1189 + err_destory_aux: 1190 + msm_edp_aux_destroy(dev, ctrl->aux); 1191 + ctrl->aux = NULL; 1192 + return ret; 1193 + } 1194 + 1195 + void msm_edp_ctrl_destroy(struct edp_ctrl *ctrl) 1196 + { 1197 + if (!ctrl) 1198 + return; 1199 + 1200 + if (ctrl->workqueue) { 1201 + flush_workqueue(ctrl->workqueue); 1202 + destroy_workqueue(ctrl->workqueue); 1203 + ctrl->workqueue = NULL; 1204 + } 1205 + 1206 + if (ctrl->aux) { 1207 + msm_edp_aux_destroy(&ctrl->pdev->dev, ctrl->aux); 1208 + ctrl->aux = NULL; 1209 + } 1210 + 1211 + kfree(ctrl->edid); 1212 + ctrl->edid = NULL; 1213 + 1214 + mutex_destroy(&ctrl->dev_mutex); 1215 + } 1216 + 1217 + bool msm_edp_ctrl_panel_connected(struct edp_ctrl *ctrl) 1218 + { 1219 + mutex_lock(&ctrl->dev_mutex); 1220 + DBG("connect status = %d", ctrl->edp_connected); 1221 + if (ctrl->edp_connected) { 1222 + mutex_unlock(&ctrl->dev_mutex); 1223 + return true; 1224 + } 1225 + 1226 + if (!ctrl->power_on) { 1227 + edp_ctrl_phy_aux_enable(ctrl, 1); 1228 + edp_ctrl_irq_enable(ctrl, 1); 1229 + } 1230 + 1231 + if (drm_dp_dpcd_read(ctrl->drm_aux, DP_DPCD_REV, ctrl->dpcd, 1232 + DP_RECEIVER_CAP_SIZE) < DP_RECEIVER_CAP_SIZE) { 1233 + pr_err("%s: AUX channel is NOT ready\n", __func__); 1234 + memset(ctrl->dpcd, 0, DP_RECEIVER_CAP_SIZE); 1235 + } else { 1236 + ctrl->edp_connected = true; 1237 + } 1238 + 1239 + if (!ctrl->power_on) { 1240 + edp_ctrl_irq_enable(ctrl, 0); 1241 + edp_ctrl_phy_aux_enable(ctrl, 0); 1242 + } 1243 + 1244 + DBG("exit: connect status=%d", ctrl->edp_connected); 1245 + 1246 + mutex_unlock(&ctrl->dev_mutex); 1247 + 1248 + return ctrl->edp_connected; 1249 + } 1250 + 1251 + int msm_edp_ctrl_get_panel_info(struct edp_ctrl *ctrl, 1252 + struct drm_connector *connector, struct edid **edid) 1253 + { 1254 + int ret = 0; 1255 + 1256 + mutex_lock(&ctrl->dev_mutex); 1257 + 1258 + if (ctrl->edid) { 1259 + if (edid) { 1260 + DBG("Just return edid buffer"); 1261 + *edid = ctrl->edid; 1262 + } 1263 + goto unlock_ret; 1264 + } 1265 + 1266 + if (!ctrl->power_on) { 1267 + edp_ctrl_phy_aux_enable(ctrl, 1); 1268 + edp_ctrl_irq_enable(ctrl, 1); 1269 + } 1270 + 1271 + ret = drm_dp_link_probe(ctrl->drm_aux, &ctrl->dp_link); 1272 + if (ret) { 1273 + pr_err("%s: read dpcd cap failed, %d\n", __func__, ret); 1274 + goto disable_ret; 1275 + } 1276 + 1277 + /* Initialize link rate as panel max link rate */ 1278 + ctrl->link_rate = drm_dp_link_rate_to_bw_code(ctrl->dp_link.rate); 1279 + 1280 + ctrl->edid = drm_get_edid(connector, &ctrl->drm_aux->ddc); 1281 + if (!ctrl->edid) { 1282 + pr_err("%s: edid read fail\n", __func__); 1283 + goto disable_ret; 1284 + } 1285 + 1286 + if (edid) 1287 + *edid = ctrl->edid; 1288 + 1289 + disable_ret: 1290 + if (!ctrl->power_on) { 1291 + edp_ctrl_irq_enable(ctrl, 0); 1292 + edp_ctrl_phy_aux_enable(ctrl, 0); 1293 + } 1294 + unlock_ret: 1295 + mutex_unlock(&ctrl->dev_mutex); 1296 + return ret; 1297 + } 1298 + 1299 + int msm_edp_ctrl_timing_cfg(struct edp_ctrl *ctrl, 1300 + const struct drm_display_mode *mode, 1301 + const struct drm_display_info *info) 1302 + { 1303 + u32 hstart_from_sync, vstart_from_sync; 1304 + u32 data; 1305 + int ret = 0; 1306 + 1307 + mutex_lock(&ctrl->dev_mutex); 1308 + /* 1309 + * Need to keep color depth, pixel rate and 1310 + * interlaced information in ctrl context 1311 + */ 1312 + ctrl->color_depth = info->bpc; 1313 + ctrl->pixel_rate = mode->clock; 1314 + ctrl->interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE); 1315 + 1316 + /* Fill initial link config based on passed in timing */ 1317 + edp_fill_link_cfg(ctrl); 1318 + 1319 + if (edp_clk_enable(ctrl, EDP_CLK_MASK_AHB)) { 1320 + pr_err("%s, fail to prepare enable ahb clk\n", __func__); 1321 + ret = -EINVAL; 1322 + goto unlock_ret; 1323 + } 1324 + edp_clock_synchrous(ctrl, 1); 1325 + 1326 + /* Configure eDP timing to HW */ 1327 + edp_write(ctrl->base + REG_EDP_TOTAL_HOR_VER, 1328 + EDP_TOTAL_HOR_VER_HORIZ(mode->htotal) | 1329 + EDP_TOTAL_HOR_VER_VERT(mode->vtotal)); 1330 + 1331 + vstart_from_sync = mode->vtotal - mode->vsync_start; 1332 + hstart_from_sync = mode->htotal - mode->hsync_start; 1333 + edp_write(ctrl->base + REG_EDP_START_HOR_VER_FROM_SYNC, 1334 + EDP_START_HOR_VER_FROM_SYNC_HORIZ(hstart_from_sync) | 1335 + EDP_START_HOR_VER_FROM_SYNC_VERT(vstart_from_sync)); 1336 + 1337 + data = EDP_HSYNC_VSYNC_WIDTH_POLARITY_VERT( 1338 + mode->vsync_end - mode->vsync_start); 1339 + data |= EDP_HSYNC_VSYNC_WIDTH_POLARITY_HORIZ( 1340 + mode->hsync_end - mode->hsync_start); 1341 + if (mode->flags & DRM_MODE_FLAG_NVSYNC) 1342 + data |= EDP_HSYNC_VSYNC_WIDTH_POLARITY_NVSYNC; 1343 + if (mode->flags & DRM_MODE_FLAG_NHSYNC) 1344 + data |= EDP_HSYNC_VSYNC_WIDTH_POLARITY_NHSYNC; 1345 + edp_write(ctrl->base + REG_EDP_HSYNC_VSYNC_WIDTH_POLARITY, data); 1346 + 1347 + edp_write(ctrl->base + REG_EDP_ACTIVE_HOR_VER, 1348 + EDP_ACTIVE_HOR_VER_HORIZ(mode->hdisplay) | 1349 + EDP_ACTIVE_HOR_VER_VERT(mode->vdisplay)); 1350 + 1351 + edp_clk_disable(ctrl, EDP_CLK_MASK_AHB); 1352 + 1353 + unlock_ret: 1354 + mutex_unlock(&ctrl->dev_mutex); 1355 + return ret; 1356 + } 1357 + 1358 + bool msm_edp_ctrl_pixel_clock_valid(struct edp_ctrl *ctrl, 1359 + u32 pixel_rate, u32 *pm, u32 *pn) 1360 + { 1361 + const struct edp_pixel_clk_div *divs; 1362 + u32 err = 1; /* 1% error tolerance */ 1363 + u32 clk_err; 1364 + int i; 1365 + 1366 + if (ctrl->link_rate == DP_LINK_BW_1_62) { 1367 + divs = clk_divs[0]; 1368 + } else if (ctrl->link_rate == DP_LINK_BW_2_7) { 1369 + divs = clk_divs[1]; 1370 + } else { 1371 + pr_err("%s: Invalid link rate,%d\n", __func__, ctrl->link_rate); 1372 + return false; 1373 + } 1374 + 1375 + for (i = 0; i < EDP_PIXEL_CLK_NUM; i++) { 1376 + clk_err = abs(divs[i].rate - pixel_rate); 1377 + if ((divs[i].rate * err / 100) >= clk_err) { 1378 + if (pm) 1379 + *pm = divs[i].m; 1380 + if (pn) 1381 + *pn = divs[i].n; 1382 + return true; 1383 + } 1384 + } 1385 + 1386 + DBG("pixel clock %d(kHz) not supported", pixel_rate); 1387 + 1388 + return false; 1389 + } 1390 +
+106
drivers/gpu/drm/msm/edp/edp_phy.c
··· 1 + /* 2 + * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 and 6 + * only version 2 as published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + */ 13 + 14 + #include "edp.h" 15 + #include "edp.xml.h" 16 + 17 + #define EDP_MAX_LANE 4 18 + 19 + struct edp_phy { 20 + void __iomem *base; 21 + }; 22 + 23 + bool msm_edp_phy_ready(struct edp_phy *phy) 24 + { 25 + u32 status; 26 + int cnt = 100; 27 + 28 + while (--cnt) { 29 + status = edp_read(phy->base + 30 + REG_EDP_PHY_GLB_PHY_STATUS); 31 + if (status & 0x01) 32 + break; 33 + usleep_range(500, 1000); 34 + } 35 + 36 + if (cnt == 0) { 37 + pr_err("%s: PHY NOT ready\n", __func__); 38 + return false; 39 + } else { 40 + return true; 41 + } 42 + } 43 + 44 + void msm_edp_phy_ctrl(struct edp_phy *phy, int enable) 45 + { 46 + DBG("enable=%d", enable); 47 + if (enable) { 48 + /* Reset */ 49 + edp_write(phy->base + REG_EDP_PHY_CTRL, 50 + EDP_PHY_CTRL_SW_RESET | EDP_PHY_CTRL_SW_RESET_PLL); 51 + /* Make sure fully reset */ 52 + wmb(); 53 + usleep_range(500, 1000); 54 + edp_write(phy->base + REG_EDP_PHY_CTRL, 0x000); 55 + edp_write(phy->base + REG_EDP_PHY_GLB_PD_CTL, 0x3f); 56 + edp_write(phy->base + REG_EDP_PHY_GLB_CFG, 0x1); 57 + } else { 58 + edp_write(phy->base + REG_EDP_PHY_GLB_PD_CTL, 0xc0); 59 + } 60 + } 61 + 62 + /* voltage mode and pre emphasis cfg */ 63 + void msm_edp_phy_vm_pe_init(struct edp_phy *phy) 64 + { 65 + edp_write(phy->base + REG_EDP_PHY_GLB_VM_CFG0, 0x3); 66 + edp_write(phy->base + REG_EDP_PHY_GLB_VM_CFG1, 0x64); 67 + edp_write(phy->base + REG_EDP_PHY_GLB_MISC9, 0x6c); 68 + } 69 + 70 + void msm_edp_phy_vm_pe_cfg(struct edp_phy *phy, u32 v0, u32 v1) 71 + { 72 + edp_write(phy->base + REG_EDP_PHY_GLB_VM_CFG0, v0); 73 + edp_write(phy->base + REG_EDP_PHY_GLB_VM_CFG1, v1); 74 + } 75 + 76 + void msm_edp_phy_lane_power_ctrl(struct edp_phy *phy, bool up, u32 max_lane) 77 + { 78 + u32 i; 79 + u32 data; 80 + 81 + if (up) 82 + data = 0; /* power up */ 83 + else 84 + data = 0x7; /* power down */ 85 + 86 + for (i = 0; i < max_lane; i++) 87 + edp_write(phy->base + REG_EDP_PHY_LN_PD_CTL(i) , data); 88 + 89 + /* power down unused lane */ 90 + data = 0x7; /* power down */ 91 + for (i = max_lane; i < EDP_MAX_LANE; i++) 92 + edp_write(phy->base + REG_EDP_PHY_LN_PD_CTL(i) , data); 93 + } 94 + 95 + void *msm_edp_phy_init(struct device *dev, void __iomem *regbase) 96 + { 97 + struct edp_phy *phy = NULL; 98 + 99 + phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL); 100 + if (!phy) 101 + return NULL; 102 + 103 + phy->base = regbase; 104 + return phy; 105 + } 106 +
+6
drivers/gpu/drm/msm/msm_drv.h
··· 76 76 */ 77 77 struct hdmi *hdmi; 78 78 79 + /* eDP is for mdp5 only, but kms has not been created 80 + * when edp_bind() and edp_init() are called. Here is the only 81 + * place to keep the edp instance. 82 + */ 83 + struct msm_edp *edp; 84 + 79 85 /* when we have more than one 'msm_gpu' these need to be an array: */ 80 86 struct msm_gpu *gpu; 81 87 struct msm_file_private *lastctx;