Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * drivers/mmc/host/sdhci-msm.c - Qualcomm SDHCI Platform driver
4 *
5 * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
6 */
7
8#include <linux/module.h>
9#include <linux/of_device.h>
10#include <linux/delay.h>
11#include <linux/mmc/mmc.h>
12#include <linux/pm_runtime.h>
13#include <linux/pm_opp.h>
14#include <linux/slab.h>
15#include <linux/iopoll.h>
16#include <linux/regulator/consumer.h>
17
18#include "sdhci-pltfm.h"
19#include "cqhci.h"
20
21#define CORE_MCI_VERSION 0x50
22#define CORE_VERSION_MAJOR_SHIFT 28
23#define CORE_VERSION_MAJOR_MASK (0xf << CORE_VERSION_MAJOR_SHIFT)
24#define CORE_VERSION_MINOR_MASK 0xff
25
26#define CORE_MCI_GENERICS 0x70
27#define SWITCHABLE_SIGNALING_VOLTAGE BIT(29)
28
29#define HC_MODE_EN 0x1
30#define CORE_POWER 0x0
31#define CORE_SW_RST BIT(7)
32#define FF_CLK_SW_RST_DIS BIT(13)
33
34#define CORE_PWRCTL_BUS_OFF BIT(0)
35#define CORE_PWRCTL_BUS_ON BIT(1)
36#define CORE_PWRCTL_IO_LOW BIT(2)
37#define CORE_PWRCTL_IO_HIGH BIT(3)
38#define CORE_PWRCTL_BUS_SUCCESS BIT(0)
39#define CORE_PWRCTL_IO_SUCCESS BIT(2)
40#define REQ_BUS_OFF BIT(0)
41#define REQ_BUS_ON BIT(1)
42#define REQ_IO_LOW BIT(2)
43#define REQ_IO_HIGH BIT(3)
44#define INT_MASK 0xf
45#define MAX_PHASES 16
46#define CORE_DLL_LOCK BIT(7)
47#define CORE_DDR_DLL_LOCK BIT(11)
48#define CORE_DLL_EN BIT(16)
49#define CORE_CDR_EN BIT(17)
50#define CORE_CK_OUT_EN BIT(18)
51#define CORE_CDR_EXT_EN BIT(19)
52#define CORE_DLL_PDN BIT(29)
53#define CORE_DLL_RST BIT(30)
54#define CORE_CMD_DAT_TRACK_SEL BIT(0)
55
56#define CORE_DDR_CAL_EN BIT(0)
57#define CORE_FLL_CYCLE_CNT BIT(18)
58#define CORE_DLL_CLOCK_DISABLE BIT(21)
59
60#define DLL_USR_CTL_POR_VAL 0x10800
61#define ENABLE_DLL_LOCK_STATUS BIT(26)
62#define FINE_TUNE_MODE_EN BIT(27)
63#define BIAS_OK_SIGNAL BIT(29)
64
65#define DLL_CONFIG_3_LOW_FREQ_VAL 0x08
66#define DLL_CONFIG_3_HIGH_FREQ_VAL 0x10
67
68#define CORE_VENDOR_SPEC_POR_VAL 0xa9c
69#define CORE_CLK_PWRSAVE BIT(1)
70#define CORE_HC_MCLK_SEL_DFLT (2 << 8)
71#define CORE_HC_MCLK_SEL_HS400 (3 << 8)
72#define CORE_HC_MCLK_SEL_MASK (3 << 8)
73#define CORE_IO_PAD_PWR_SWITCH_EN BIT(15)
74#define CORE_IO_PAD_PWR_SWITCH BIT(16)
75#define CORE_HC_SELECT_IN_EN BIT(18)
76#define CORE_HC_SELECT_IN_HS400 (6 << 19)
77#define CORE_HC_SELECT_IN_MASK (7 << 19)
78
79#define CORE_3_0V_SUPPORT BIT(25)
80#define CORE_1_8V_SUPPORT BIT(26)
81#define CORE_VOLT_SUPPORT (CORE_3_0V_SUPPORT | CORE_1_8V_SUPPORT)
82
83#define CORE_CSR_CDC_CTLR_CFG0 0x130
84#define CORE_SW_TRIG_FULL_CALIB BIT(16)
85#define CORE_HW_AUTOCAL_ENA BIT(17)
86
87#define CORE_CSR_CDC_CTLR_CFG1 0x134
88#define CORE_CSR_CDC_CAL_TIMER_CFG0 0x138
89#define CORE_TIMER_ENA BIT(16)
90
91#define CORE_CSR_CDC_CAL_TIMER_CFG1 0x13C
92#define CORE_CSR_CDC_REFCOUNT_CFG 0x140
93#define CORE_CSR_CDC_COARSE_CAL_CFG 0x144
94#define CORE_CDC_OFFSET_CFG 0x14C
95#define CORE_CSR_CDC_DELAY_CFG 0x150
96#define CORE_CDC_SLAVE_DDA_CFG 0x160
97#define CORE_CSR_CDC_STATUS0 0x164
98#define CORE_CALIBRATION_DONE BIT(0)
99
100#define CORE_CDC_ERROR_CODE_MASK 0x7000000
101
102#define CORE_CSR_CDC_GEN_CFG 0x178
103#define CORE_CDC_SWITCH_BYPASS_OFF BIT(0)
104#define CORE_CDC_SWITCH_RC_EN BIT(1)
105
106#define CORE_CDC_T4_DLY_SEL BIT(0)
107#define CORE_CMDIN_RCLK_EN BIT(1)
108#define CORE_START_CDC_TRAFFIC BIT(6)
109
110#define CORE_PWRSAVE_DLL BIT(3)
111
112#define DDR_CONFIG_POR_VAL 0x80040873
113
114
115#define INVALID_TUNING_PHASE -1
116#define SDHCI_MSM_MIN_CLOCK 400000
117#define CORE_FREQ_100MHZ (100 * 1000 * 1000)
118
119#define CDR_SELEXT_SHIFT 20
120#define CDR_SELEXT_MASK (0xf << CDR_SELEXT_SHIFT)
121#define CMUX_SHIFT_PHASE_SHIFT 24
122#define CMUX_SHIFT_PHASE_MASK (7 << CMUX_SHIFT_PHASE_SHIFT)
123
124#define MSM_MMC_AUTOSUSPEND_DELAY_MS 50
125
126/* Timeout value to avoid infinite waiting for pwr_irq */
127#define MSM_PWR_IRQ_TIMEOUT_MS 5000
128
129#define msm_host_readl(msm_host, host, offset) \
130 msm_host->var_ops->msm_readl_relaxed(host, offset)
131
132#define msm_host_writel(msm_host, val, host, offset) \
133 msm_host->var_ops->msm_writel_relaxed(val, host, offset)
134
135/* CQHCI vendor specific registers */
136#define CQHCI_VENDOR_CFG1 0xA00
137#define CQHCI_VENDOR_DIS_RST_ON_CQ_EN (0x3 << 13)
138
139struct sdhci_msm_offset {
140 u32 core_hc_mode;
141 u32 core_mci_data_cnt;
142 u32 core_mci_status;
143 u32 core_mci_fifo_cnt;
144 u32 core_mci_version;
145 u32 core_generics;
146 u32 core_testbus_config;
147 u32 core_testbus_sel2_bit;
148 u32 core_testbus_ena;
149 u32 core_testbus_sel2;
150 u32 core_pwrctl_status;
151 u32 core_pwrctl_mask;
152 u32 core_pwrctl_clear;
153 u32 core_pwrctl_ctl;
154 u32 core_sdcc_debug_reg;
155 u32 core_dll_config;
156 u32 core_dll_status;
157 u32 core_vendor_spec;
158 u32 core_vendor_spec_adma_err_addr0;
159 u32 core_vendor_spec_adma_err_addr1;
160 u32 core_vendor_spec_func2;
161 u32 core_vendor_spec_capabilities0;
162 u32 core_ddr_200_cfg;
163 u32 core_vendor_spec3;
164 u32 core_dll_config_2;
165 u32 core_dll_config_3;
166 u32 core_ddr_config_old; /* Applicable to sdcc minor ver < 0x49 */
167 u32 core_ddr_config;
168 u32 core_dll_usr_ctl; /* Present on SDCC5.1 onwards */
169};
170
171static const struct sdhci_msm_offset sdhci_msm_v5_offset = {
172 .core_mci_data_cnt = 0x35c,
173 .core_mci_status = 0x324,
174 .core_mci_fifo_cnt = 0x308,
175 .core_mci_version = 0x318,
176 .core_generics = 0x320,
177 .core_testbus_config = 0x32c,
178 .core_testbus_sel2_bit = 3,
179 .core_testbus_ena = (1 << 31),
180 .core_testbus_sel2 = (1 << 3),
181 .core_pwrctl_status = 0x240,
182 .core_pwrctl_mask = 0x244,
183 .core_pwrctl_clear = 0x248,
184 .core_pwrctl_ctl = 0x24c,
185 .core_sdcc_debug_reg = 0x358,
186 .core_dll_config = 0x200,
187 .core_dll_status = 0x208,
188 .core_vendor_spec = 0x20c,
189 .core_vendor_spec_adma_err_addr0 = 0x214,
190 .core_vendor_spec_adma_err_addr1 = 0x218,
191 .core_vendor_spec_func2 = 0x210,
192 .core_vendor_spec_capabilities0 = 0x21c,
193 .core_ddr_200_cfg = 0x224,
194 .core_vendor_spec3 = 0x250,
195 .core_dll_config_2 = 0x254,
196 .core_dll_config_3 = 0x258,
197 .core_ddr_config = 0x25c,
198 .core_dll_usr_ctl = 0x388,
199};
200
201static const struct sdhci_msm_offset sdhci_msm_mci_offset = {
202 .core_hc_mode = 0x78,
203 .core_mci_data_cnt = 0x30,
204 .core_mci_status = 0x34,
205 .core_mci_fifo_cnt = 0x44,
206 .core_mci_version = 0x050,
207 .core_generics = 0x70,
208 .core_testbus_config = 0x0cc,
209 .core_testbus_sel2_bit = 4,
210 .core_testbus_ena = (1 << 3),
211 .core_testbus_sel2 = (1 << 4),
212 .core_pwrctl_status = 0xdc,
213 .core_pwrctl_mask = 0xe0,
214 .core_pwrctl_clear = 0xe4,
215 .core_pwrctl_ctl = 0xe8,
216 .core_sdcc_debug_reg = 0x124,
217 .core_dll_config = 0x100,
218 .core_dll_status = 0x108,
219 .core_vendor_spec = 0x10c,
220 .core_vendor_spec_adma_err_addr0 = 0x114,
221 .core_vendor_spec_adma_err_addr1 = 0x118,
222 .core_vendor_spec_func2 = 0x110,
223 .core_vendor_spec_capabilities0 = 0x11c,
224 .core_ddr_200_cfg = 0x184,
225 .core_vendor_spec3 = 0x1b0,
226 .core_dll_config_2 = 0x1b4,
227 .core_ddr_config_old = 0x1b8,
228 .core_ddr_config = 0x1bc,
229};
230
231struct sdhci_msm_variant_ops {
232 u32 (*msm_readl_relaxed)(struct sdhci_host *host, u32 offset);
233 void (*msm_writel_relaxed)(u32 val, struct sdhci_host *host,
234 u32 offset);
235};
236
237/*
238 * From V5, register spaces have changed. Wrap this info in a structure
239 * and choose the data_structure based on version info mentioned in DT.
240 */
241struct sdhci_msm_variant_info {
242 bool mci_removed;
243 bool restore_dll_config;
244 bool uses_tassadar_dll;
245 const struct sdhci_msm_variant_ops *var_ops;
246 const struct sdhci_msm_offset *offset;
247};
248
249struct sdhci_msm_host {
250 struct platform_device *pdev;
251 void __iomem *core_mem; /* MSM SDCC mapped address */
252 int pwr_irq; /* power irq */
253 struct clk *bus_clk; /* SDHC bus voter clock */
254 struct clk *xo_clk; /* TCXO clk needed for FLL feature of cm_dll*/
255 struct clk_bulk_data bulk_clks[4]; /* core, iface, cal, sleep clocks */
256 unsigned long clk_rate;
257 struct mmc_host *mmc;
258 struct opp_table *opp_table;
259 bool has_opp_table;
260 bool use_14lpp_dll_reset;
261 bool tuning_done;
262 bool calibration_done;
263 u8 saved_tuning_phase;
264 bool use_cdclp533;
265 u32 curr_pwr_state;
266 u32 curr_io_level;
267 wait_queue_head_t pwr_irq_wait;
268 bool pwr_irq_flag;
269 u32 caps_0;
270 bool mci_removed;
271 bool restore_dll_config;
272 const struct sdhci_msm_variant_ops *var_ops;
273 const struct sdhci_msm_offset *offset;
274 bool use_cdr;
275 u32 transfer_mode;
276 bool updated_ddr_cfg;
277 bool uses_tassadar_dll;
278 u32 dll_config;
279 u32 ddr_config;
280};
281
282static const struct sdhci_msm_offset *sdhci_priv_msm_offset(struct sdhci_host *host)
283{
284 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
285 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
286
287 return msm_host->offset;
288}
289
290/*
291 * APIs to read/write to vendor specific registers which were there in the
292 * core_mem region before MCI was removed.
293 */
294static u32 sdhci_msm_mci_variant_readl_relaxed(struct sdhci_host *host,
295 u32 offset)
296{
297 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
298 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
299
300 return readl_relaxed(msm_host->core_mem + offset);
301}
302
303static u32 sdhci_msm_v5_variant_readl_relaxed(struct sdhci_host *host,
304 u32 offset)
305{
306 return readl_relaxed(host->ioaddr + offset);
307}
308
309static void sdhci_msm_mci_variant_writel_relaxed(u32 val,
310 struct sdhci_host *host, u32 offset)
311{
312 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
313 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
314
315 writel_relaxed(val, msm_host->core_mem + offset);
316}
317
318static void sdhci_msm_v5_variant_writel_relaxed(u32 val,
319 struct sdhci_host *host, u32 offset)
320{
321 writel_relaxed(val, host->ioaddr + offset);
322}
323
324static unsigned int msm_get_clock_rate_for_bus_mode(struct sdhci_host *host,
325 unsigned int clock)
326{
327 struct mmc_ios ios = host->mmc->ios;
328 /*
329 * The SDHC requires internal clock frequency to be double the
330 * actual clock that will be set for DDR mode. The controller
331 * uses the faster clock(100/400MHz) for some of its parts and
332 * send the actual required clock (50/200MHz) to the card.
333 */
334 if (ios.timing == MMC_TIMING_UHS_DDR50 ||
335 ios.timing == MMC_TIMING_MMC_DDR52 ||
336 ios.timing == MMC_TIMING_MMC_HS400 ||
337 host->flags & SDHCI_HS400_TUNING)
338 clock *= 2;
339 return clock;
340}
341
342static void msm_set_clock_rate_for_bus_mode(struct sdhci_host *host,
343 unsigned int clock)
344{
345 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
346 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
347 struct mmc_ios curr_ios = host->mmc->ios;
348 struct clk *core_clk = msm_host->bulk_clks[0].clk;
349 int rc;
350
351 clock = msm_get_clock_rate_for_bus_mode(host, clock);
352 rc = dev_pm_opp_set_rate(mmc_dev(host->mmc), clock);
353 if (rc) {
354 pr_err("%s: Failed to set clock at rate %u at timing %d\n",
355 mmc_hostname(host->mmc), clock,
356 curr_ios.timing);
357 return;
358 }
359 msm_host->clk_rate = clock;
360 pr_debug("%s: Setting clock at rate %lu at timing %d\n",
361 mmc_hostname(host->mmc), clk_get_rate(core_clk),
362 curr_ios.timing);
363}
364
365/* Platform specific tuning */
366static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host, u8 poll)
367{
368 u32 wait_cnt = 50;
369 u8 ck_out_en;
370 struct mmc_host *mmc = host->mmc;
371 const struct sdhci_msm_offset *msm_offset =
372 sdhci_priv_msm_offset(host);
373
374 /* Poll for CK_OUT_EN bit. max. poll time = 50us */
375 ck_out_en = !!(readl_relaxed(host->ioaddr +
376 msm_offset->core_dll_config) & CORE_CK_OUT_EN);
377
378 while (ck_out_en != poll) {
379 if (--wait_cnt == 0) {
380 dev_err(mmc_dev(mmc), "%s: CK_OUT_EN bit is not %d\n",
381 mmc_hostname(mmc), poll);
382 return -ETIMEDOUT;
383 }
384 udelay(1);
385
386 ck_out_en = !!(readl_relaxed(host->ioaddr +
387 msm_offset->core_dll_config) & CORE_CK_OUT_EN);
388 }
389
390 return 0;
391}
392
393static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
394{
395 int rc;
396 static const u8 grey_coded_phase_table[] = {
397 0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4,
398 0xc, 0xd, 0xf, 0xe, 0xa, 0xb, 0x9, 0x8
399 };
400 unsigned long flags;
401 u32 config;
402 struct mmc_host *mmc = host->mmc;
403 const struct sdhci_msm_offset *msm_offset =
404 sdhci_priv_msm_offset(host);
405
406 if (phase > 0xf)
407 return -EINVAL;
408
409 spin_lock_irqsave(&host->lock, flags);
410
411 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
412 config &= ~(CORE_CDR_EN | CORE_CK_OUT_EN);
413 config |= (CORE_CDR_EXT_EN | CORE_DLL_EN);
414 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
415
416 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '0' */
417 rc = msm_dll_poll_ck_out_en(host, 0);
418 if (rc)
419 goto err_out;
420
421 /*
422 * Write the selected DLL clock output phase (0 ... 15)
423 * to CDR_SELEXT bit field of DLL_CONFIG register.
424 */
425 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
426 config &= ~CDR_SELEXT_MASK;
427 config |= grey_coded_phase_table[phase] << CDR_SELEXT_SHIFT;
428 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
429
430 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
431 config |= CORE_CK_OUT_EN;
432 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
433
434 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '1' */
435 rc = msm_dll_poll_ck_out_en(host, 1);
436 if (rc)
437 goto err_out;
438
439 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
440 config |= CORE_CDR_EN;
441 config &= ~CORE_CDR_EXT_EN;
442 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
443 goto out;
444
445err_out:
446 dev_err(mmc_dev(mmc), "%s: Failed to set DLL phase: %d\n",
447 mmc_hostname(mmc), phase);
448out:
449 spin_unlock_irqrestore(&host->lock, flags);
450 return rc;
451}
452
453/*
454 * Find out the greatest range of consecuitive selected
455 * DLL clock output phases that can be used as sampling
456 * setting for SD3.0 UHS-I card read operation (in SDR104
457 * timing mode) or for eMMC4.5 card read operation (in
458 * HS400/HS200 timing mode).
459 * Select the 3/4 of the range and configure the DLL with the
460 * selected DLL clock output phase.
461 */
462
463static int msm_find_most_appropriate_phase(struct sdhci_host *host,
464 u8 *phase_table, u8 total_phases)
465{
466 int ret;
467 u8 ranges[MAX_PHASES][MAX_PHASES] = { {0}, {0} };
468 u8 phases_per_row[MAX_PHASES] = { 0 };
469 int row_index = 0, col_index = 0, selected_row_index = 0, curr_max = 0;
470 int i, cnt, phase_0_raw_index = 0, phase_15_raw_index = 0;
471 bool phase_0_found = false, phase_15_found = false;
472 struct mmc_host *mmc = host->mmc;
473
474 if (!total_phases || (total_phases > MAX_PHASES)) {
475 dev_err(mmc_dev(mmc), "%s: Invalid argument: total_phases=%d\n",
476 mmc_hostname(mmc), total_phases);
477 return -EINVAL;
478 }
479
480 for (cnt = 0; cnt < total_phases; cnt++) {
481 ranges[row_index][col_index] = phase_table[cnt];
482 phases_per_row[row_index] += 1;
483 col_index++;
484
485 if ((cnt + 1) == total_phases) {
486 continue;
487 /* check if next phase in phase_table is consecutive or not */
488 } else if ((phase_table[cnt] + 1) != phase_table[cnt + 1]) {
489 row_index++;
490 col_index = 0;
491 }
492 }
493
494 if (row_index >= MAX_PHASES)
495 return -EINVAL;
496
497 /* Check if phase-0 is present in first valid window? */
498 if (!ranges[0][0]) {
499 phase_0_found = true;
500 phase_0_raw_index = 0;
501 /* Check if cycle exist between 2 valid windows */
502 for (cnt = 1; cnt <= row_index; cnt++) {
503 if (phases_per_row[cnt]) {
504 for (i = 0; i < phases_per_row[cnt]; i++) {
505 if (ranges[cnt][i] == 15) {
506 phase_15_found = true;
507 phase_15_raw_index = cnt;
508 break;
509 }
510 }
511 }
512 }
513 }
514
515 /* If 2 valid windows form cycle then merge them as single window */
516 if (phase_0_found && phase_15_found) {
517 /* number of phases in raw where phase 0 is present */
518 u8 phases_0 = phases_per_row[phase_0_raw_index];
519 /* number of phases in raw where phase 15 is present */
520 u8 phases_15 = phases_per_row[phase_15_raw_index];
521
522 if (phases_0 + phases_15 >= MAX_PHASES)
523 /*
524 * If there are more than 1 phase windows then total
525 * number of phases in both the windows should not be
526 * more than or equal to MAX_PHASES.
527 */
528 return -EINVAL;
529
530 /* Merge 2 cyclic windows */
531 i = phases_15;
532 for (cnt = 0; cnt < phases_0; cnt++) {
533 ranges[phase_15_raw_index][i] =
534 ranges[phase_0_raw_index][cnt];
535 if (++i >= MAX_PHASES)
536 break;
537 }
538
539 phases_per_row[phase_0_raw_index] = 0;
540 phases_per_row[phase_15_raw_index] = phases_15 + phases_0;
541 }
542
543 for (cnt = 0; cnt <= row_index; cnt++) {
544 if (phases_per_row[cnt] > curr_max) {
545 curr_max = phases_per_row[cnt];
546 selected_row_index = cnt;
547 }
548 }
549
550 i = (curr_max * 3) / 4;
551 if (i)
552 i--;
553
554 ret = ranges[selected_row_index][i];
555
556 if (ret >= MAX_PHASES) {
557 ret = -EINVAL;
558 dev_err(mmc_dev(mmc), "%s: Invalid phase selected=%d\n",
559 mmc_hostname(mmc), ret);
560 }
561
562 return ret;
563}
564
565static inline void msm_cm_dll_set_freq(struct sdhci_host *host)
566{
567 u32 mclk_freq = 0, config;
568 const struct sdhci_msm_offset *msm_offset =
569 sdhci_priv_msm_offset(host);
570
571 /* Program the MCLK value to MCLK_FREQ bit field */
572 if (host->clock <= 112000000)
573 mclk_freq = 0;
574 else if (host->clock <= 125000000)
575 mclk_freq = 1;
576 else if (host->clock <= 137000000)
577 mclk_freq = 2;
578 else if (host->clock <= 150000000)
579 mclk_freq = 3;
580 else if (host->clock <= 162000000)
581 mclk_freq = 4;
582 else if (host->clock <= 175000000)
583 mclk_freq = 5;
584 else if (host->clock <= 187000000)
585 mclk_freq = 6;
586 else if (host->clock <= 200000000)
587 mclk_freq = 7;
588
589 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
590 config &= ~CMUX_SHIFT_PHASE_MASK;
591 config |= mclk_freq << CMUX_SHIFT_PHASE_SHIFT;
592 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
593}
594
595/* Initialize the DLL (Programmable Delay Line) */
596static int msm_init_cm_dll(struct sdhci_host *host)
597{
598 struct mmc_host *mmc = host->mmc;
599 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
600 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
601 int wait_cnt = 50;
602 unsigned long flags, xo_clk = 0;
603 u32 config;
604 const struct sdhci_msm_offset *msm_offset =
605 msm_host->offset;
606
607 if (msm_host->use_14lpp_dll_reset && !IS_ERR_OR_NULL(msm_host->xo_clk))
608 xo_clk = clk_get_rate(msm_host->xo_clk);
609
610 spin_lock_irqsave(&host->lock, flags);
611
612 /*
613 * Make sure that clock is always enabled when DLL
614 * tuning is in progress. Keeping PWRSAVE ON may
615 * turn off the clock.
616 */
617 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec);
618 config &= ~CORE_CLK_PWRSAVE;
619 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
620
621 config = msm_host->dll_config;
622 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
623
624 if (msm_host->use_14lpp_dll_reset) {
625 config = readl_relaxed(host->ioaddr +
626 msm_offset->core_dll_config);
627 config &= ~CORE_CK_OUT_EN;
628 writel_relaxed(config, host->ioaddr +
629 msm_offset->core_dll_config);
630
631 config = readl_relaxed(host->ioaddr +
632 msm_offset->core_dll_config_2);
633 config |= CORE_DLL_CLOCK_DISABLE;
634 writel_relaxed(config, host->ioaddr +
635 msm_offset->core_dll_config_2);
636 }
637
638 config = readl_relaxed(host->ioaddr +
639 msm_offset->core_dll_config);
640 config |= CORE_DLL_RST;
641 writel_relaxed(config, host->ioaddr +
642 msm_offset->core_dll_config);
643
644 config = readl_relaxed(host->ioaddr +
645 msm_offset->core_dll_config);
646 config |= CORE_DLL_PDN;
647 writel_relaxed(config, host->ioaddr +
648 msm_offset->core_dll_config);
649
650 if (!msm_host->dll_config)
651 msm_cm_dll_set_freq(host);
652
653 if (msm_host->use_14lpp_dll_reset &&
654 !IS_ERR_OR_NULL(msm_host->xo_clk)) {
655 u32 mclk_freq = 0;
656
657 config = readl_relaxed(host->ioaddr +
658 msm_offset->core_dll_config_2);
659 config &= CORE_FLL_CYCLE_CNT;
660 if (config)
661 mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 8),
662 xo_clk);
663 else
664 mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 4),
665 xo_clk);
666
667 config = readl_relaxed(host->ioaddr +
668 msm_offset->core_dll_config_2);
669 config &= ~(0xFF << 10);
670 config |= mclk_freq << 10;
671
672 writel_relaxed(config, host->ioaddr +
673 msm_offset->core_dll_config_2);
674 /* wait for 5us before enabling DLL clock */
675 udelay(5);
676 }
677
678 config = readl_relaxed(host->ioaddr +
679 msm_offset->core_dll_config);
680 config &= ~CORE_DLL_RST;
681 writel_relaxed(config, host->ioaddr +
682 msm_offset->core_dll_config);
683
684 config = readl_relaxed(host->ioaddr +
685 msm_offset->core_dll_config);
686 config &= ~CORE_DLL_PDN;
687 writel_relaxed(config, host->ioaddr +
688 msm_offset->core_dll_config);
689
690 if (msm_host->use_14lpp_dll_reset) {
691 if (!msm_host->dll_config)
692 msm_cm_dll_set_freq(host);
693 config = readl_relaxed(host->ioaddr +
694 msm_offset->core_dll_config_2);
695 config &= ~CORE_DLL_CLOCK_DISABLE;
696 writel_relaxed(config, host->ioaddr +
697 msm_offset->core_dll_config_2);
698 }
699
700 /*
701 * Configure DLL user control register to enable DLL status.
702 * This setting is applicable to SDCC v5.1 onwards only.
703 */
704 if (msm_host->uses_tassadar_dll) {
705 config = DLL_USR_CTL_POR_VAL | FINE_TUNE_MODE_EN |
706 ENABLE_DLL_LOCK_STATUS | BIAS_OK_SIGNAL;
707 writel_relaxed(config, host->ioaddr +
708 msm_offset->core_dll_usr_ctl);
709
710 config = readl_relaxed(host->ioaddr +
711 msm_offset->core_dll_config_3);
712 config &= ~0xFF;
713 if (msm_host->clk_rate < 150000000)
714 config |= DLL_CONFIG_3_LOW_FREQ_VAL;
715 else
716 config |= DLL_CONFIG_3_HIGH_FREQ_VAL;
717 writel_relaxed(config, host->ioaddr +
718 msm_offset->core_dll_config_3);
719 }
720
721 config = readl_relaxed(host->ioaddr +
722 msm_offset->core_dll_config);
723 config |= CORE_DLL_EN;
724 writel_relaxed(config, host->ioaddr +
725 msm_offset->core_dll_config);
726
727 config = readl_relaxed(host->ioaddr +
728 msm_offset->core_dll_config);
729 config |= CORE_CK_OUT_EN;
730 writel_relaxed(config, host->ioaddr +
731 msm_offset->core_dll_config);
732
733 /* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */
734 while (!(readl_relaxed(host->ioaddr + msm_offset->core_dll_status) &
735 CORE_DLL_LOCK)) {
736 /* max. wait for 50us sec for LOCK bit to be set */
737 if (--wait_cnt == 0) {
738 dev_err(mmc_dev(mmc), "%s: DLL failed to LOCK\n",
739 mmc_hostname(mmc));
740 spin_unlock_irqrestore(&host->lock, flags);
741 return -ETIMEDOUT;
742 }
743 udelay(1);
744 }
745
746 spin_unlock_irqrestore(&host->lock, flags);
747 return 0;
748}
749
750static void msm_hc_select_default(struct sdhci_host *host)
751{
752 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
753 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
754 u32 config;
755 const struct sdhci_msm_offset *msm_offset =
756 msm_host->offset;
757
758 if (!msm_host->use_cdclp533) {
759 config = readl_relaxed(host->ioaddr +
760 msm_offset->core_vendor_spec3);
761 config &= ~CORE_PWRSAVE_DLL;
762 writel_relaxed(config, host->ioaddr +
763 msm_offset->core_vendor_spec3);
764 }
765
766 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec);
767 config &= ~CORE_HC_MCLK_SEL_MASK;
768 config |= CORE_HC_MCLK_SEL_DFLT;
769 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
770
771 /*
772 * Disable HC_SELECT_IN to be able to use the UHS mode select
773 * configuration from Host Control2 register for all other
774 * modes.
775 * Write 0 to HC_SELECT_IN and HC_SELECT_IN_EN field
776 * in VENDOR_SPEC_FUNC
777 */
778 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec);
779 config &= ~CORE_HC_SELECT_IN_EN;
780 config &= ~CORE_HC_SELECT_IN_MASK;
781 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
782
783 /*
784 * Make sure above writes impacting free running MCLK are completed
785 * before changing the clk_rate at GCC.
786 */
787 wmb();
788}
789
790static void msm_hc_select_hs400(struct sdhci_host *host)
791{
792 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
793 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
794 struct mmc_ios ios = host->mmc->ios;
795 u32 config, dll_lock;
796 int rc;
797 const struct sdhci_msm_offset *msm_offset =
798 msm_host->offset;
799
800 /* Select the divided clock (free running MCLK/2) */
801 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec);
802 config &= ~CORE_HC_MCLK_SEL_MASK;
803 config |= CORE_HC_MCLK_SEL_HS400;
804
805 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
806 /*
807 * Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC
808 * register
809 */
810 if ((msm_host->tuning_done || ios.enhanced_strobe) &&
811 !msm_host->calibration_done) {
812 config = readl_relaxed(host->ioaddr +
813 msm_offset->core_vendor_spec);
814 config |= CORE_HC_SELECT_IN_HS400;
815 config |= CORE_HC_SELECT_IN_EN;
816 writel_relaxed(config, host->ioaddr +
817 msm_offset->core_vendor_spec);
818 }
819 if (!msm_host->clk_rate && !msm_host->use_cdclp533) {
820 /*
821 * Poll on DLL_LOCK or DDR_DLL_LOCK bits in
822 * core_dll_status to be set. This should get set
823 * within 15 us at 200 MHz.
824 */
825 rc = readl_relaxed_poll_timeout(host->ioaddr +
826 msm_offset->core_dll_status,
827 dll_lock,
828 (dll_lock &
829 (CORE_DLL_LOCK |
830 CORE_DDR_DLL_LOCK)), 10,
831 1000);
832 if (rc == -ETIMEDOUT)
833 pr_err("%s: Unable to get DLL_LOCK/DDR_DLL_LOCK, dll_status: 0x%08x\n",
834 mmc_hostname(host->mmc), dll_lock);
835 }
836 /*
837 * Make sure above writes impacting free running MCLK are completed
838 * before changing the clk_rate at GCC.
839 */
840 wmb();
841}
842
843/*
844 * sdhci_msm_hc_select_mode :- In general all timing modes are
845 * controlled via UHS mode select in Host Control2 register.
846 * eMMC specific HS200/HS400 doesn't have their respective modes
847 * defined here, hence we use these values.
848 *
849 * HS200 - SDR104 (Since they both are equivalent in functionality)
850 * HS400 - This involves multiple configurations
851 * Initially SDR104 - when tuning is required as HS200
852 * Then when switching to DDR @ 400MHz (HS400) we use
853 * the vendor specific HC_SELECT_IN to control the mode.
854 *
855 * In addition to controlling the modes we also need to select the
856 * correct input clock for DLL depending on the mode.
857 *
858 * HS400 - divided clock (free running MCLK/2)
859 * All other modes - default (free running MCLK)
860 */
861static void sdhci_msm_hc_select_mode(struct sdhci_host *host)
862{
863 struct mmc_ios ios = host->mmc->ios;
864
865 if (ios.timing == MMC_TIMING_MMC_HS400 ||
866 host->flags & SDHCI_HS400_TUNING)
867 msm_hc_select_hs400(host);
868 else
869 msm_hc_select_default(host);
870}
871
872static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
873{
874 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
875 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
876 u32 config, calib_done;
877 int ret;
878 const struct sdhci_msm_offset *msm_offset =
879 msm_host->offset;
880
881 pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__);
882
883 /*
884 * Retuning in HS400 (DDR mode) will fail, just reset the
885 * tuning block and restore the saved tuning phase.
886 */
887 ret = msm_init_cm_dll(host);
888 if (ret)
889 goto out;
890
891 /* Set the selected phase in delay line hw block */
892 ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
893 if (ret)
894 goto out;
895
896 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
897 config |= CORE_CMD_DAT_TRACK_SEL;
898 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
899
900 config = readl_relaxed(host->ioaddr + msm_offset->core_ddr_200_cfg);
901 config &= ~CORE_CDC_T4_DLY_SEL;
902 writel_relaxed(config, host->ioaddr + msm_offset->core_ddr_200_cfg);
903
904 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG);
905 config &= ~CORE_CDC_SWITCH_BYPASS_OFF;
906 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_GEN_CFG);
907
908 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG);
909 config |= CORE_CDC_SWITCH_RC_EN;
910 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_GEN_CFG);
911
912 config = readl_relaxed(host->ioaddr + msm_offset->core_ddr_200_cfg);
913 config &= ~CORE_START_CDC_TRAFFIC;
914 writel_relaxed(config, host->ioaddr + msm_offset->core_ddr_200_cfg);
915
916 /* Perform CDC Register Initialization Sequence */
917
918 writel_relaxed(0x11800EC, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
919 writel_relaxed(0x3011111, host->ioaddr + CORE_CSR_CDC_CTLR_CFG1);
920 writel_relaxed(0x1201000, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
921 writel_relaxed(0x4, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG1);
922 writel_relaxed(0xCB732020, host->ioaddr + CORE_CSR_CDC_REFCOUNT_CFG);
923 writel_relaxed(0xB19, host->ioaddr + CORE_CSR_CDC_COARSE_CAL_CFG);
924 writel_relaxed(0x4E2, host->ioaddr + CORE_CSR_CDC_DELAY_CFG);
925 writel_relaxed(0x0, host->ioaddr + CORE_CDC_OFFSET_CFG);
926 writel_relaxed(0x16334, host->ioaddr + CORE_CDC_SLAVE_DDA_CFG);
927
928 /* CDC HW Calibration */
929
930 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
931 config |= CORE_SW_TRIG_FULL_CALIB;
932 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
933
934 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
935 config &= ~CORE_SW_TRIG_FULL_CALIB;
936 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
937
938 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
939 config |= CORE_HW_AUTOCAL_ENA;
940 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
941
942 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
943 config |= CORE_TIMER_ENA;
944 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
945
946 ret = readl_relaxed_poll_timeout(host->ioaddr + CORE_CSR_CDC_STATUS0,
947 calib_done,
948 (calib_done & CORE_CALIBRATION_DONE),
949 1, 50);
950
951 if (ret == -ETIMEDOUT) {
952 pr_err("%s: %s: CDC calibration was not completed\n",
953 mmc_hostname(host->mmc), __func__);
954 goto out;
955 }
956
957 ret = readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0)
958 & CORE_CDC_ERROR_CODE_MASK;
959 if (ret) {
960 pr_err("%s: %s: CDC error code %d\n",
961 mmc_hostname(host->mmc), __func__, ret);
962 ret = -EINVAL;
963 goto out;
964 }
965
966 config = readl_relaxed(host->ioaddr + msm_offset->core_ddr_200_cfg);
967 config |= CORE_START_CDC_TRAFFIC;
968 writel_relaxed(config, host->ioaddr + msm_offset->core_ddr_200_cfg);
969out:
970 pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc),
971 __func__, ret);
972 return ret;
973}
974
975static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
976{
977 struct mmc_host *mmc = host->mmc;
978 u32 dll_status, config, ddr_cfg_offset;
979 int ret;
980 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
981 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
982 const struct sdhci_msm_offset *msm_offset =
983 sdhci_priv_msm_offset(host);
984
985 pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__);
986
987 /*
988 * Currently the core_ddr_config register defaults to desired
989 * configuration on reset. Currently reprogramming the power on
990 * reset (POR) value in case it might have been modified by
991 * bootloaders. In the future, if this changes, then the desired
992 * values will need to be programmed appropriately.
993 */
994 if (msm_host->updated_ddr_cfg)
995 ddr_cfg_offset = msm_offset->core_ddr_config;
996 else
997 ddr_cfg_offset = msm_offset->core_ddr_config_old;
998 writel_relaxed(msm_host->ddr_config, host->ioaddr + ddr_cfg_offset);
999
1000 if (mmc->ios.enhanced_strobe) {
1001 config = readl_relaxed(host->ioaddr +
1002 msm_offset->core_ddr_200_cfg);
1003 config |= CORE_CMDIN_RCLK_EN;
1004 writel_relaxed(config, host->ioaddr +
1005 msm_offset->core_ddr_200_cfg);
1006 }
1007
1008 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config_2);
1009 config |= CORE_DDR_CAL_EN;
1010 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config_2);
1011
1012 ret = readl_relaxed_poll_timeout(host->ioaddr +
1013 msm_offset->core_dll_status,
1014 dll_status,
1015 (dll_status & CORE_DDR_DLL_LOCK),
1016 10, 1000);
1017
1018 if (ret == -ETIMEDOUT) {
1019 pr_err("%s: %s: CM_DLL_SDC4 calibration was not completed\n",
1020 mmc_hostname(host->mmc), __func__);
1021 goto out;
1022 }
1023
1024 /*
1025 * Set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3.
1026 * When MCLK is gated OFF, it is not gated for less than 0.5us
1027 * and MCLK must be switched on for at-least 1us before DATA
1028 * starts coming. Controllers with 14lpp and later tech DLL cannot
1029 * guarantee above requirement. So PWRSAVE_DLL should not be
1030 * turned on for host controllers using this DLL.
1031 */
1032 if (!msm_host->use_14lpp_dll_reset) {
1033 config = readl_relaxed(host->ioaddr +
1034 msm_offset->core_vendor_spec3);
1035 config |= CORE_PWRSAVE_DLL;
1036 writel_relaxed(config, host->ioaddr +
1037 msm_offset->core_vendor_spec3);
1038 }
1039
1040 /*
1041 * Drain writebuffer to ensure above DLL calibration
1042 * and PWRSAVE DLL is enabled.
1043 */
1044 wmb();
1045out:
1046 pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc),
1047 __func__, ret);
1048 return ret;
1049}
1050
1051static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host)
1052{
1053 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1054 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1055 struct mmc_host *mmc = host->mmc;
1056 int ret;
1057 u32 config;
1058 const struct sdhci_msm_offset *msm_offset =
1059 msm_host->offset;
1060
1061 pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__);
1062
1063 /*
1064 * Retuning in HS400 (DDR mode) will fail, just reset the
1065 * tuning block and restore the saved tuning phase.
1066 */
1067 ret = msm_init_cm_dll(host);
1068 if (ret)
1069 goto out;
1070
1071 if (!mmc->ios.enhanced_strobe) {
1072 /* Set the selected phase in delay line hw block */
1073 ret = msm_config_cm_dll_phase(host,
1074 msm_host->saved_tuning_phase);
1075 if (ret)
1076 goto out;
1077 config = readl_relaxed(host->ioaddr +
1078 msm_offset->core_dll_config);
1079 config |= CORE_CMD_DAT_TRACK_SEL;
1080 writel_relaxed(config, host->ioaddr +
1081 msm_offset->core_dll_config);
1082 }
1083
1084 if (msm_host->use_cdclp533)
1085 ret = sdhci_msm_cdclp533_calibration(host);
1086 else
1087 ret = sdhci_msm_cm_dll_sdc4_calibration(host);
1088out:
1089 pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc),
1090 __func__, ret);
1091 return ret;
1092}
1093
1094static bool sdhci_msm_is_tuning_needed(struct sdhci_host *host)
1095{
1096 struct mmc_ios *ios = &host->mmc->ios;
1097
1098 /*
1099 * Tuning is required for SDR104, HS200 and HS400 cards and
1100 * if clock frequency is greater than 100MHz in these modes.
1101 */
1102 if (host->clock <= CORE_FREQ_100MHZ ||
1103 !(ios->timing == MMC_TIMING_MMC_HS400 ||
1104 ios->timing == MMC_TIMING_MMC_HS200 ||
1105 ios->timing == MMC_TIMING_UHS_SDR104) ||
1106 ios->enhanced_strobe)
1107 return false;
1108
1109 return true;
1110}
1111
1112static int sdhci_msm_restore_sdr_dll_config(struct sdhci_host *host)
1113{
1114 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1115 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1116 int ret;
1117
1118 /*
1119 * SDR DLL comes into picture only for timing modes which needs
1120 * tuning.
1121 */
1122 if (!sdhci_msm_is_tuning_needed(host))
1123 return 0;
1124
1125 /* Reset the tuning block */
1126 ret = msm_init_cm_dll(host);
1127 if (ret)
1128 return ret;
1129
1130 /* Restore the tuning block */
1131 ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
1132
1133 return ret;
1134}
1135
1136static void sdhci_msm_set_cdr(struct sdhci_host *host, bool enable)
1137{
1138 const struct sdhci_msm_offset *msm_offset = sdhci_priv_msm_offset(host);
1139 u32 config, oldconfig = readl_relaxed(host->ioaddr +
1140 msm_offset->core_dll_config);
1141
1142 config = oldconfig;
1143 if (enable) {
1144 config |= CORE_CDR_EN;
1145 config &= ~CORE_CDR_EXT_EN;
1146 } else {
1147 config &= ~CORE_CDR_EN;
1148 config |= CORE_CDR_EXT_EN;
1149 }
1150
1151 if (config != oldconfig) {
1152 writel_relaxed(config, host->ioaddr +
1153 msm_offset->core_dll_config);
1154 }
1155}
1156
1157static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode)
1158{
1159 struct sdhci_host *host = mmc_priv(mmc);
1160 int tuning_seq_cnt = 3;
1161 u8 phase, tuned_phases[16], tuned_phase_cnt = 0;
1162 int rc;
1163 struct mmc_ios ios = host->mmc->ios;
1164 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1165 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1166
1167 if (!sdhci_msm_is_tuning_needed(host)) {
1168 msm_host->use_cdr = false;
1169 sdhci_msm_set_cdr(host, false);
1170 return 0;
1171 }
1172
1173 /* Clock-Data-Recovery used to dynamically adjust RX sampling point */
1174 msm_host->use_cdr = true;
1175
1176 /*
1177 * Clear tuning_done flag before tuning to ensure proper
1178 * HS400 settings.
1179 */
1180 msm_host->tuning_done = 0;
1181
1182 /*
1183 * For HS400 tuning in HS200 timing requires:
1184 * - select MCLK/2 in VENDOR_SPEC
1185 * - program MCLK to 400MHz (or nearest supported) in GCC
1186 */
1187 if (host->flags & SDHCI_HS400_TUNING) {
1188 sdhci_msm_hc_select_mode(host);
1189 msm_set_clock_rate_for_bus_mode(host, ios.clock);
1190 host->flags &= ~SDHCI_HS400_TUNING;
1191 }
1192
1193retry:
1194 /* First of all reset the tuning block */
1195 rc = msm_init_cm_dll(host);
1196 if (rc)
1197 return rc;
1198
1199 phase = 0;
1200 do {
1201 /* Set the phase in delay line hw block */
1202 rc = msm_config_cm_dll_phase(host, phase);
1203 if (rc)
1204 return rc;
1205
1206 rc = mmc_send_tuning(mmc, opcode, NULL);
1207 if (!rc) {
1208 /* Tuning is successful at this tuning point */
1209 tuned_phases[tuned_phase_cnt++] = phase;
1210 dev_dbg(mmc_dev(mmc), "%s: Found good phase = %d\n",
1211 mmc_hostname(mmc), phase);
1212 }
1213 } while (++phase < ARRAY_SIZE(tuned_phases));
1214
1215 if (tuned_phase_cnt) {
1216 rc = msm_find_most_appropriate_phase(host, tuned_phases,
1217 tuned_phase_cnt);
1218 if (rc < 0)
1219 return rc;
1220 else
1221 phase = rc;
1222
1223 /*
1224 * Finally set the selected phase in delay
1225 * line hw block.
1226 */
1227 rc = msm_config_cm_dll_phase(host, phase);
1228 if (rc)
1229 return rc;
1230 msm_host->saved_tuning_phase = phase;
1231 dev_dbg(mmc_dev(mmc), "%s: Setting the tuning phase to %d\n",
1232 mmc_hostname(mmc), phase);
1233 } else {
1234 if (--tuning_seq_cnt)
1235 goto retry;
1236 /* Tuning failed */
1237 dev_dbg(mmc_dev(mmc), "%s: No tuning point found\n",
1238 mmc_hostname(mmc));
1239 rc = -EIO;
1240 }
1241
1242 if (!rc)
1243 msm_host->tuning_done = true;
1244 return rc;
1245}
1246
1247/*
1248 * sdhci_msm_hs400 - Calibrate the DLL for HS400 bus speed mode operation.
1249 * This needs to be done for both tuning and enhanced_strobe mode.
1250 * DLL operation is only needed for clock > 100MHz. For clock <= 100MHz
1251 * fixed feedback clock is used.
1252 */
1253static void sdhci_msm_hs400(struct sdhci_host *host, struct mmc_ios *ios)
1254{
1255 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1256 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1257 int ret;
1258
1259 if (host->clock > CORE_FREQ_100MHZ &&
1260 (msm_host->tuning_done || ios->enhanced_strobe) &&
1261 !msm_host->calibration_done) {
1262 ret = sdhci_msm_hs400_dll_calibration(host);
1263 if (!ret)
1264 msm_host->calibration_done = true;
1265 else
1266 pr_err("%s: Failed to calibrate DLL for hs400 mode (%d)\n",
1267 mmc_hostname(host->mmc), ret);
1268 }
1269}
1270
1271static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
1272 unsigned int uhs)
1273{
1274 struct mmc_host *mmc = host->mmc;
1275 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1276 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1277 u16 ctrl_2;
1278 u32 config;
1279 const struct sdhci_msm_offset *msm_offset =
1280 msm_host->offset;
1281
1282 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1283 /* Select Bus Speed Mode for host */
1284 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1285 switch (uhs) {
1286 case MMC_TIMING_UHS_SDR12:
1287 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
1288 break;
1289 case MMC_TIMING_UHS_SDR25:
1290 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
1291 break;
1292 case MMC_TIMING_UHS_SDR50:
1293 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
1294 break;
1295 case MMC_TIMING_MMC_HS400:
1296 case MMC_TIMING_MMC_HS200:
1297 case MMC_TIMING_UHS_SDR104:
1298 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
1299 break;
1300 case MMC_TIMING_UHS_DDR50:
1301 case MMC_TIMING_MMC_DDR52:
1302 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
1303 break;
1304 }
1305
1306 /*
1307 * When clock frequency is less than 100MHz, the feedback clock must be
1308 * provided and DLL must not be used so that tuning can be skipped. To
1309 * provide feedback clock, the mode selection can be any value less
1310 * than 3'b011 in bits [2:0] of HOST CONTROL2 register.
1311 */
1312 if (host->clock <= CORE_FREQ_100MHZ) {
1313 if (uhs == MMC_TIMING_MMC_HS400 ||
1314 uhs == MMC_TIMING_MMC_HS200 ||
1315 uhs == MMC_TIMING_UHS_SDR104)
1316 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1317 /*
1318 * DLL is not required for clock <= 100MHz
1319 * Thus, make sure DLL it is disabled when not required
1320 */
1321 config = readl_relaxed(host->ioaddr +
1322 msm_offset->core_dll_config);
1323 config |= CORE_DLL_RST;
1324 writel_relaxed(config, host->ioaddr +
1325 msm_offset->core_dll_config);
1326
1327 config = readl_relaxed(host->ioaddr +
1328 msm_offset->core_dll_config);
1329 config |= CORE_DLL_PDN;
1330 writel_relaxed(config, host->ioaddr +
1331 msm_offset->core_dll_config);
1332
1333 /*
1334 * The DLL needs to be restored and CDCLP533 recalibrated
1335 * when the clock frequency is set back to 400MHz.
1336 */
1337 msm_host->calibration_done = false;
1338 }
1339
1340 dev_dbg(mmc_dev(mmc), "%s: clock=%u uhs=%u ctrl_2=0x%x\n",
1341 mmc_hostname(host->mmc), host->clock, uhs, ctrl_2);
1342 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1343
1344 if (mmc->ios.timing == MMC_TIMING_MMC_HS400)
1345 sdhci_msm_hs400(host, &mmc->ios);
1346}
1347
1348static inline void sdhci_msm_init_pwr_irq_wait(struct sdhci_msm_host *msm_host)
1349{
1350 init_waitqueue_head(&msm_host->pwr_irq_wait);
1351}
1352
1353static inline void sdhci_msm_complete_pwr_irq_wait(
1354 struct sdhci_msm_host *msm_host)
1355{
1356 wake_up(&msm_host->pwr_irq_wait);
1357}
1358
1359/*
1360 * sdhci_msm_check_power_status API should be called when registers writes
1361 * which can toggle sdhci IO bus ON/OFF or change IO lines HIGH/LOW happens.
1362 * To what state the register writes will change the IO lines should be passed
1363 * as the argument req_type. This API will check whether the IO line's state
1364 * is already the expected state and will wait for power irq only if
1365 * power irq is expected to be trigerred based on the current IO line state
1366 * and expected IO line state.
1367 */
1368static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
1369{
1370 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1371 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1372 bool done = false;
1373 u32 val = SWITCHABLE_SIGNALING_VOLTAGE;
1374 const struct sdhci_msm_offset *msm_offset =
1375 msm_host->offset;
1376
1377 pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n",
1378 mmc_hostname(host->mmc), __func__, req_type,
1379 msm_host->curr_pwr_state, msm_host->curr_io_level);
1380
1381 /*
1382 * The power interrupt will not be generated for signal voltage
1383 * switches if SWITCHABLE_SIGNALING_VOLTAGE in MCI_GENERICS is not set.
1384 * Since sdhci-msm-v5, this bit has been removed and SW must consider
1385 * it as always set.
1386 */
1387 if (!msm_host->mci_removed)
1388 val = msm_host_readl(msm_host, host,
1389 msm_offset->core_generics);
1390 if ((req_type & REQ_IO_HIGH || req_type & REQ_IO_LOW) &&
1391 !(val & SWITCHABLE_SIGNALING_VOLTAGE)) {
1392 return;
1393 }
1394
1395 /*
1396 * The IRQ for request type IO High/LOW will be generated when -
1397 * there is a state change in 1.8V enable bit (bit 3) of
1398 * SDHCI_HOST_CONTROL2 register. The reset state of that bit is 0
1399 * which indicates 3.3V IO voltage. So, when MMC core layer tries
1400 * to set it to 3.3V before card detection happens, the
1401 * IRQ doesn't get triggered as there is no state change in this bit.
1402 * The driver already handles this case by changing the IO voltage
1403 * level to high as part of controller power up sequence. Hence, check
1404 * for host->pwr to handle a case where IO voltage high request is
1405 * issued even before controller power up.
1406 */
1407 if ((req_type & REQ_IO_HIGH) && !host->pwr) {
1408 pr_debug("%s: do not wait for power IRQ that never comes, req_type: %d\n",
1409 mmc_hostname(host->mmc), req_type);
1410 return;
1411 }
1412 if ((req_type & msm_host->curr_pwr_state) ||
1413 (req_type & msm_host->curr_io_level))
1414 done = true;
1415 /*
1416 * This is needed here to handle cases where register writes will
1417 * not change the current bus state or io level of the controller.
1418 * In this case, no power irq will be triggerred and we should
1419 * not wait.
1420 */
1421 if (!done) {
1422 if (!wait_event_timeout(msm_host->pwr_irq_wait,
1423 msm_host->pwr_irq_flag,
1424 msecs_to_jiffies(MSM_PWR_IRQ_TIMEOUT_MS)))
1425 dev_warn(&msm_host->pdev->dev,
1426 "%s: pwr_irq for req: (%d) timed out\n",
1427 mmc_hostname(host->mmc), req_type);
1428 }
1429 pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc),
1430 __func__, req_type);
1431}
1432
1433static void sdhci_msm_dump_pwr_ctrl_regs(struct sdhci_host *host)
1434{
1435 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1436 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1437 const struct sdhci_msm_offset *msm_offset =
1438 msm_host->offset;
1439
1440 pr_err("%s: PWRCTL_STATUS: 0x%08x | PWRCTL_MASK: 0x%08x | PWRCTL_CTL: 0x%08x\n",
1441 mmc_hostname(host->mmc),
1442 msm_host_readl(msm_host, host, msm_offset->core_pwrctl_status),
1443 msm_host_readl(msm_host, host, msm_offset->core_pwrctl_mask),
1444 msm_host_readl(msm_host, host, msm_offset->core_pwrctl_ctl));
1445}
1446
1447static void sdhci_msm_handle_pwr_irq(struct sdhci_host *host, int irq)
1448{
1449 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1450 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1451 u32 irq_status, irq_ack = 0;
1452 int retry = 10;
1453 u32 pwr_state = 0, io_level = 0;
1454 u32 config;
1455 const struct sdhci_msm_offset *msm_offset = msm_host->offset;
1456
1457 irq_status = msm_host_readl(msm_host, host,
1458 msm_offset->core_pwrctl_status);
1459 irq_status &= INT_MASK;
1460
1461 msm_host_writel(msm_host, irq_status, host,
1462 msm_offset->core_pwrctl_clear);
1463
1464 /*
1465 * There is a rare HW scenario where the first clear pulse could be
1466 * lost when actual reset and clear/read of status register is
1467 * happening at a time. Hence, retry for at least 10 times to make
1468 * sure status register is cleared. Otherwise, this will result in
1469 * a spurious power IRQ resulting in system instability.
1470 */
1471 while (irq_status & msm_host_readl(msm_host, host,
1472 msm_offset->core_pwrctl_status)) {
1473 if (retry == 0) {
1474 pr_err("%s: Timedout clearing (0x%x) pwrctl status register\n",
1475 mmc_hostname(host->mmc), irq_status);
1476 sdhci_msm_dump_pwr_ctrl_regs(host);
1477 WARN_ON(1);
1478 break;
1479 }
1480 msm_host_writel(msm_host, irq_status, host,
1481 msm_offset->core_pwrctl_clear);
1482 retry--;
1483 udelay(10);
1484 }
1485
1486 /* Handle BUS ON/OFF*/
1487 if (irq_status & CORE_PWRCTL_BUS_ON) {
1488 pwr_state = REQ_BUS_ON;
1489 io_level = REQ_IO_HIGH;
1490 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
1491 }
1492 if (irq_status & CORE_PWRCTL_BUS_OFF) {
1493 pwr_state = REQ_BUS_OFF;
1494 io_level = REQ_IO_LOW;
1495 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
1496 }
1497 /* Handle IO LOW/HIGH */
1498 if (irq_status & CORE_PWRCTL_IO_LOW) {
1499 io_level = REQ_IO_LOW;
1500 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
1501 }
1502 if (irq_status & CORE_PWRCTL_IO_HIGH) {
1503 io_level = REQ_IO_HIGH;
1504 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
1505 }
1506
1507 /*
1508 * The driver has to acknowledge the interrupt, switch voltages and
1509 * report back if it succeded or not to this register. The voltage
1510 * switches are handled by the sdhci core, so just report success.
1511 */
1512 msm_host_writel(msm_host, irq_ack, host,
1513 msm_offset->core_pwrctl_ctl);
1514
1515 /*
1516 * If we don't have info regarding the voltage levels supported by
1517 * regulators, don't change the IO PAD PWR SWITCH.
1518 */
1519 if (msm_host->caps_0 & CORE_VOLT_SUPPORT) {
1520 u32 new_config;
1521 /*
1522 * We should unset IO PAD PWR switch only if the register write
1523 * can set IO lines high and the regulator also switches to 3 V.
1524 * Else, we should keep the IO PAD PWR switch set.
1525 * This is applicable to certain targets where eMMC vccq supply
1526 * is only 1.8V. In such targets, even during REQ_IO_HIGH, the
1527 * IO PAD PWR switch must be kept set to reflect actual
1528 * regulator voltage. This way, during initialization of
1529 * controllers with only 1.8V, we will set the IO PAD bit
1530 * without waiting for a REQ_IO_LOW.
1531 */
1532 config = readl_relaxed(host->ioaddr +
1533 msm_offset->core_vendor_spec);
1534 new_config = config;
1535
1536 if ((io_level & REQ_IO_HIGH) &&
1537 (msm_host->caps_0 & CORE_3_0V_SUPPORT))
1538 new_config &= ~CORE_IO_PAD_PWR_SWITCH;
1539 else if ((io_level & REQ_IO_LOW) ||
1540 (msm_host->caps_0 & CORE_1_8V_SUPPORT))
1541 new_config |= CORE_IO_PAD_PWR_SWITCH;
1542
1543 if (config ^ new_config)
1544 writel_relaxed(new_config, host->ioaddr +
1545 msm_offset->core_vendor_spec);
1546 }
1547
1548 if (pwr_state)
1549 msm_host->curr_pwr_state = pwr_state;
1550 if (io_level)
1551 msm_host->curr_io_level = io_level;
1552
1553 pr_debug("%s: %s: Handled IRQ(%d), irq_status=0x%x, ack=0x%x\n",
1554 mmc_hostname(msm_host->mmc), __func__, irq, irq_status,
1555 irq_ack);
1556}
1557
1558static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
1559{
1560 struct sdhci_host *host = (struct sdhci_host *)data;
1561 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1562 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1563
1564 sdhci_msm_handle_pwr_irq(host, irq);
1565 msm_host->pwr_irq_flag = 1;
1566 sdhci_msm_complete_pwr_irq_wait(msm_host);
1567
1568
1569 return IRQ_HANDLED;
1570}
1571
1572static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host)
1573{
1574 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1575 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1576 struct clk *core_clk = msm_host->bulk_clks[0].clk;
1577
1578 return clk_round_rate(core_clk, ULONG_MAX);
1579}
1580
1581static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host)
1582{
1583 return SDHCI_MSM_MIN_CLOCK;
1584}
1585
1586/**
1587 * __sdhci_msm_set_clock - sdhci_msm clock control.
1588 *
1589 * Description:
1590 * MSM controller does not use internal divider and
1591 * instead directly control the GCC clock as per
1592 * HW recommendation.
1593 **/
1594static void __sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
1595{
1596 u16 clk;
1597 /*
1598 * Keep actual_clock as zero -
1599 * - since there is no divider used so no need of having actual_clock.
1600 * - MSM controller uses SDCLK for data timeout calculation. If
1601 * actual_clock is zero, host->clock is taken for calculation.
1602 */
1603 host->mmc->actual_clock = 0;
1604
1605 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
1606
1607 if (clock == 0)
1608 return;
1609
1610 /*
1611 * MSM controller do not use clock divider.
1612 * Thus read SDHCI_CLOCK_CONTROL and only enable
1613 * clock with no divider value programmed.
1614 */
1615 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1616 sdhci_enable_clk(host, clk);
1617}
1618
1619/* sdhci_msm_set_clock - Called with (host->lock) spinlock held. */
1620static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
1621{
1622 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1623 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1624
1625 if (!clock) {
1626 msm_host->clk_rate = clock;
1627 goto out;
1628 }
1629
1630 sdhci_msm_hc_select_mode(host);
1631
1632 msm_set_clock_rate_for_bus_mode(host, clock);
1633out:
1634 __sdhci_msm_set_clock(host, clock);
1635}
1636
1637/*****************************************************************************\
1638 * *
1639 * MSM Command Queue Engine (CQE) *
1640 * *
1641\*****************************************************************************/
1642
1643static u32 sdhci_msm_cqe_irq(struct sdhci_host *host, u32 intmask)
1644{
1645 int cmd_error = 0;
1646 int data_error = 0;
1647
1648 if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
1649 return intmask;
1650
1651 cqhci_irq(host->mmc, intmask, cmd_error, data_error);
1652 return 0;
1653}
1654
1655static void sdhci_msm_cqe_disable(struct mmc_host *mmc, bool recovery)
1656{
1657 struct sdhci_host *host = mmc_priv(mmc);
1658 unsigned long flags;
1659 u32 ctrl;
1660
1661 /*
1662 * When CQE is halted, the legacy SDHCI path operates only
1663 * on 16-byte descriptors in 64bit mode.
1664 */
1665 if (host->flags & SDHCI_USE_64_BIT_DMA)
1666 host->desc_sz = 16;
1667
1668 spin_lock_irqsave(&host->lock, flags);
1669
1670 /*
1671 * During CQE command transfers, command complete bit gets latched.
1672 * So s/w should clear command complete interrupt status when CQE is
1673 * either halted or disabled. Otherwise unexpected SDCHI legacy
1674 * interrupt gets triggered when CQE is halted/disabled.
1675 */
1676 ctrl = sdhci_readl(host, SDHCI_INT_ENABLE);
1677 ctrl |= SDHCI_INT_RESPONSE;
1678 sdhci_writel(host, ctrl, SDHCI_INT_ENABLE);
1679 sdhci_writel(host, SDHCI_INT_RESPONSE, SDHCI_INT_STATUS);
1680
1681 spin_unlock_irqrestore(&host->lock, flags);
1682
1683 sdhci_cqe_disable(mmc, recovery);
1684}
1685
1686static const struct cqhci_host_ops sdhci_msm_cqhci_ops = {
1687 .enable = sdhci_cqe_enable,
1688 .disable = sdhci_msm_cqe_disable,
1689};
1690
1691static int sdhci_msm_cqe_add_host(struct sdhci_host *host,
1692 struct platform_device *pdev)
1693{
1694 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1695 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1696 struct cqhci_host *cq_host;
1697 bool dma64;
1698 u32 cqcfg;
1699 int ret;
1700
1701 /*
1702 * When CQE is halted, SDHC operates only on 16byte ADMA descriptors.
1703 * So ensure ADMA table is allocated for 16byte descriptors.
1704 */
1705 if (host->caps & SDHCI_CAN_64BIT)
1706 host->alloc_desc_sz = 16;
1707
1708 ret = sdhci_setup_host(host);
1709 if (ret)
1710 return ret;
1711
1712 cq_host = cqhci_pltfm_init(pdev);
1713 if (IS_ERR(cq_host)) {
1714 ret = PTR_ERR(cq_host);
1715 dev_err(&pdev->dev, "cqhci-pltfm init: failed: %d\n", ret);
1716 goto cleanup;
1717 }
1718
1719 msm_host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
1720 cq_host->ops = &sdhci_msm_cqhci_ops;
1721
1722 dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
1723
1724 ret = cqhci_init(cq_host, host->mmc, dma64);
1725 if (ret) {
1726 dev_err(&pdev->dev, "%s: CQE init: failed (%d)\n",
1727 mmc_hostname(host->mmc), ret);
1728 goto cleanup;
1729 }
1730
1731 /* Disable cqe reset due to cqe enable signal */
1732 cqcfg = cqhci_readl(cq_host, CQHCI_VENDOR_CFG1);
1733 cqcfg |= CQHCI_VENDOR_DIS_RST_ON_CQ_EN;
1734 cqhci_writel(cq_host, cqcfg, CQHCI_VENDOR_CFG1);
1735
1736 /*
1737 * SDHC expects 12byte ADMA descriptors till CQE is enabled.
1738 * So limit desc_sz to 12 so that the data commands that are sent
1739 * during card initialization (before CQE gets enabled) would
1740 * get executed without any issues.
1741 */
1742 if (host->flags & SDHCI_USE_64_BIT_DMA)
1743 host->desc_sz = 12;
1744
1745 ret = __sdhci_add_host(host);
1746 if (ret)
1747 goto cleanup;
1748
1749 dev_info(&pdev->dev, "%s: CQE init: success\n",
1750 mmc_hostname(host->mmc));
1751 return ret;
1752
1753cleanup:
1754 sdhci_cleanup_host(host);
1755 return ret;
1756}
1757
1758/*
1759 * Platform specific register write functions. This is so that, if any
1760 * register write needs to be followed up by platform specific actions,
1761 * they can be added here. These functions can go to sleep when writes
1762 * to certain registers are done.
1763 * These functions are relying on sdhci_set_ios not using spinlock.
1764 */
1765static int __sdhci_msm_check_write(struct sdhci_host *host, u16 val, int reg)
1766{
1767 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1768 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1769 u32 req_type = 0;
1770
1771 switch (reg) {
1772 case SDHCI_HOST_CONTROL2:
1773 req_type = (val & SDHCI_CTRL_VDD_180) ? REQ_IO_LOW :
1774 REQ_IO_HIGH;
1775 break;
1776 case SDHCI_SOFTWARE_RESET:
1777 if (host->pwr && (val & SDHCI_RESET_ALL))
1778 req_type = REQ_BUS_OFF;
1779 break;
1780 case SDHCI_POWER_CONTROL:
1781 req_type = !val ? REQ_BUS_OFF : REQ_BUS_ON;
1782 break;
1783 case SDHCI_TRANSFER_MODE:
1784 msm_host->transfer_mode = val;
1785 break;
1786 case SDHCI_COMMAND:
1787 if (!msm_host->use_cdr)
1788 break;
1789 if ((msm_host->transfer_mode & SDHCI_TRNS_READ) &&
1790 SDHCI_GET_CMD(val) != MMC_SEND_TUNING_BLOCK_HS200 &&
1791 SDHCI_GET_CMD(val) != MMC_SEND_TUNING_BLOCK)
1792 sdhci_msm_set_cdr(host, true);
1793 else
1794 sdhci_msm_set_cdr(host, false);
1795 break;
1796 }
1797
1798 if (req_type) {
1799 msm_host->pwr_irq_flag = 0;
1800 /*
1801 * Since this register write may trigger a power irq, ensure
1802 * all previous register writes are complete by this point.
1803 */
1804 mb();
1805 }
1806 return req_type;
1807}
1808
1809/* This function may sleep*/
1810static void sdhci_msm_writew(struct sdhci_host *host, u16 val, int reg)
1811{
1812 u32 req_type = 0;
1813
1814 req_type = __sdhci_msm_check_write(host, val, reg);
1815 writew_relaxed(val, host->ioaddr + reg);
1816
1817 if (req_type)
1818 sdhci_msm_check_power_status(host, req_type);
1819}
1820
1821/* This function may sleep*/
1822static void sdhci_msm_writeb(struct sdhci_host *host, u8 val, int reg)
1823{
1824 u32 req_type = 0;
1825
1826 req_type = __sdhci_msm_check_write(host, val, reg);
1827
1828 writeb_relaxed(val, host->ioaddr + reg);
1829
1830 if (req_type)
1831 sdhci_msm_check_power_status(host, req_type);
1832}
1833
1834static void sdhci_msm_set_regulator_caps(struct sdhci_msm_host *msm_host)
1835{
1836 struct mmc_host *mmc = msm_host->mmc;
1837 struct regulator *supply = mmc->supply.vqmmc;
1838 u32 caps = 0, config;
1839 struct sdhci_host *host = mmc_priv(mmc);
1840 const struct sdhci_msm_offset *msm_offset = msm_host->offset;
1841
1842 if (!IS_ERR(mmc->supply.vqmmc)) {
1843 if (regulator_is_supported_voltage(supply, 1700000, 1950000))
1844 caps |= CORE_1_8V_SUPPORT;
1845 if (regulator_is_supported_voltage(supply, 2700000, 3600000))
1846 caps |= CORE_3_0V_SUPPORT;
1847
1848 if (!caps)
1849 pr_warn("%s: 1.8/3V not supported for vqmmc\n",
1850 mmc_hostname(mmc));
1851 }
1852
1853 if (caps) {
1854 /*
1855 * Set the PAD_PWR_SWITCH_EN bit so that the PAD_PWR_SWITCH
1856 * bit can be used as required later on.
1857 */
1858 u32 io_level = msm_host->curr_io_level;
1859
1860 config = readl_relaxed(host->ioaddr +
1861 msm_offset->core_vendor_spec);
1862 config |= CORE_IO_PAD_PWR_SWITCH_EN;
1863
1864 if ((io_level & REQ_IO_HIGH) && (caps & CORE_3_0V_SUPPORT))
1865 config &= ~CORE_IO_PAD_PWR_SWITCH;
1866 else if ((io_level & REQ_IO_LOW) || (caps & CORE_1_8V_SUPPORT))
1867 config |= CORE_IO_PAD_PWR_SWITCH;
1868
1869 writel_relaxed(config,
1870 host->ioaddr + msm_offset->core_vendor_spec);
1871 }
1872 msm_host->caps_0 |= caps;
1873 pr_debug("%s: supported caps: 0x%08x\n", mmc_hostname(mmc), caps);
1874}
1875
1876static void sdhci_msm_reset(struct sdhci_host *host, u8 mask)
1877{
1878 if ((host->mmc->caps2 & MMC_CAP2_CQE) && (mask & SDHCI_RESET_ALL))
1879 cqhci_deactivate(host->mmc);
1880 sdhci_reset(host, mask);
1881}
1882
1883#define DRIVER_NAME "sdhci_msm"
1884#define SDHCI_MSM_DUMP(f, x...) \
1885 pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
1886
1887void sdhci_msm_dump_vendor_regs(struct sdhci_host *host)
1888{
1889 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1890 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1891 const struct sdhci_msm_offset *msm_offset = msm_host->offset;
1892
1893 SDHCI_MSM_DUMP("----------- VENDOR REGISTER DUMP -----------\n");
1894
1895 SDHCI_MSM_DUMP(
1896 "DLL sts: 0x%08x | DLL cfg: 0x%08x | DLL cfg2: 0x%08x\n",
1897 readl_relaxed(host->ioaddr + msm_offset->core_dll_status),
1898 readl_relaxed(host->ioaddr + msm_offset->core_dll_config),
1899 readl_relaxed(host->ioaddr + msm_offset->core_dll_config_2));
1900 SDHCI_MSM_DUMP(
1901 "DLL cfg3: 0x%08x | DLL usr ctl: 0x%08x | DDR cfg: 0x%08x\n",
1902 readl_relaxed(host->ioaddr + msm_offset->core_dll_config_3),
1903 readl_relaxed(host->ioaddr + msm_offset->core_dll_usr_ctl),
1904 readl_relaxed(host->ioaddr + msm_offset->core_ddr_config));
1905 SDHCI_MSM_DUMP(
1906 "Vndr func: 0x%08x | Vndr func2 : 0x%08x Vndr func3: 0x%08x\n",
1907 readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec),
1908 readl_relaxed(host->ioaddr +
1909 msm_offset->core_vendor_spec_func2),
1910 readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec3));
1911}
1912
1913static const struct sdhci_msm_variant_ops mci_var_ops = {
1914 .msm_readl_relaxed = sdhci_msm_mci_variant_readl_relaxed,
1915 .msm_writel_relaxed = sdhci_msm_mci_variant_writel_relaxed,
1916};
1917
1918static const struct sdhci_msm_variant_ops v5_var_ops = {
1919 .msm_readl_relaxed = sdhci_msm_v5_variant_readl_relaxed,
1920 .msm_writel_relaxed = sdhci_msm_v5_variant_writel_relaxed,
1921};
1922
1923static const struct sdhci_msm_variant_info sdhci_msm_mci_var = {
1924 .var_ops = &mci_var_ops,
1925 .offset = &sdhci_msm_mci_offset,
1926};
1927
1928static const struct sdhci_msm_variant_info sdhci_msm_v5_var = {
1929 .mci_removed = true,
1930 .var_ops = &v5_var_ops,
1931 .offset = &sdhci_msm_v5_offset,
1932};
1933
1934static const struct sdhci_msm_variant_info sdm845_sdhci_var = {
1935 .mci_removed = true,
1936 .restore_dll_config = true,
1937 .var_ops = &v5_var_ops,
1938 .offset = &sdhci_msm_v5_offset,
1939};
1940
1941static const struct sdhci_msm_variant_info sm8250_sdhci_var = {
1942 .mci_removed = true,
1943 .uses_tassadar_dll = true,
1944 .var_ops = &v5_var_ops,
1945 .offset = &sdhci_msm_v5_offset,
1946};
1947
1948static const struct of_device_id sdhci_msm_dt_match[] = {
1949 {.compatible = "qcom,sdhci-msm-v4", .data = &sdhci_msm_mci_var},
1950 {.compatible = "qcom,sdhci-msm-v5", .data = &sdhci_msm_v5_var},
1951 {.compatible = "qcom,sdm845-sdhci", .data = &sdm845_sdhci_var},
1952 {.compatible = "qcom,sm8250-sdhci", .data = &sm8250_sdhci_var},
1953 {},
1954};
1955
1956MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
1957
1958static const struct sdhci_ops sdhci_msm_ops = {
1959 .reset = sdhci_msm_reset,
1960 .set_clock = sdhci_msm_set_clock,
1961 .get_min_clock = sdhci_msm_get_min_clock,
1962 .get_max_clock = sdhci_msm_get_max_clock,
1963 .set_bus_width = sdhci_set_bus_width,
1964 .set_uhs_signaling = sdhci_msm_set_uhs_signaling,
1965 .write_w = sdhci_msm_writew,
1966 .write_b = sdhci_msm_writeb,
1967 .irq = sdhci_msm_cqe_irq,
1968 .dump_vendor_regs = sdhci_msm_dump_vendor_regs,
1969};
1970
1971static const struct sdhci_pltfm_data sdhci_msm_pdata = {
1972 .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
1973 SDHCI_QUIRK_SINGLE_POWER_WRITE |
1974 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
1975 SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
1976
1977 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1978 .ops = &sdhci_msm_ops,
1979};
1980
1981static inline void sdhci_msm_get_of_property(struct platform_device *pdev,
1982 struct sdhci_host *host)
1983{
1984 struct device_node *node = pdev->dev.of_node;
1985 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1986 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1987
1988 if (of_property_read_u32(node, "qcom,ddr-config",
1989 &msm_host->ddr_config))
1990 msm_host->ddr_config = DDR_CONFIG_POR_VAL;
1991
1992 of_property_read_u32(node, "qcom,dll-config", &msm_host->dll_config);
1993}
1994
1995
1996static int sdhci_msm_probe(struct platform_device *pdev)
1997{
1998 struct sdhci_host *host;
1999 struct sdhci_pltfm_host *pltfm_host;
2000 struct sdhci_msm_host *msm_host;
2001 struct clk *clk;
2002 int ret;
2003 u16 host_version, core_minor;
2004 u32 core_version, config;
2005 u8 core_major;
2006 const struct sdhci_msm_offset *msm_offset;
2007 const struct sdhci_msm_variant_info *var_info;
2008 struct device_node *node = pdev->dev.of_node;
2009
2010 host = sdhci_pltfm_init(pdev, &sdhci_msm_pdata, sizeof(*msm_host));
2011 if (IS_ERR(host))
2012 return PTR_ERR(host);
2013
2014 host->sdma_boundary = 0;
2015 pltfm_host = sdhci_priv(host);
2016 msm_host = sdhci_pltfm_priv(pltfm_host);
2017 msm_host->mmc = host->mmc;
2018 msm_host->pdev = pdev;
2019
2020 ret = mmc_of_parse(host->mmc);
2021 if (ret)
2022 goto pltfm_free;
2023
2024 /*
2025 * Based on the compatible string, load the required msm host info from
2026 * the data associated with the version info.
2027 */
2028 var_info = of_device_get_match_data(&pdev->dev);
2029
2030 msm_host->mci_removed = var_info->mci_removed;
2031 msm_host->restore_dll_config = var_info->restore_dll_config;
2032 msm_host->var_ops = var_info->var_ops;
2033 msm_host->offset = var_info->offset;
2034 msm_host->uses_tassadar_dll = var_info->uses_tassadar_dll;
2035
2036 msm_offset = msm_host->offset;
2037
2038 sdhci_get_of_property(pdev);
2039 sdhci_msm_get_of_property(pdev, host);
2040
2041 msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;
2042
2043 /* Setup SDCC bus voter clock. */
2044 msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus");
2045 if (!IS_ERR(msm_host->bus_clk)) {
2046 /* Vote for max. clk rate for max. performance */
2047 ret = clk_set_rate(msm_host->bus_clk, INT_MAX);
2048 if (ret)
2049 goto pltfm_free;
2050 ret = clk_prepare_enable(msm_host->bus_clk);
2051 if (ret)
2052 goto pltfm_free;
2053 }
2054
2055 /* Setup main peripheral bus clock */
2056 clk = devm_clk_get(&pdev->dev, "iface");
2057 if (IS_ERR(clk)) {
2058 ret = PTR_ERR(clk);
2059 dev_err(&pdev->dev, "Peripheral clk setup failed (%d)\n", ret);
2060 goto bus_clk_disable;
2061 }
2062 msm_host->bulk_clks[1].clk = clk;
2063
2064 /* Setup SDC MMC clock */
2065 clk = devm_clk_get(&pdev->dev, "core");
2066 if (IS_ERR(clk)) {
2067 ret = PTR_ERR(clk);
2068 dev_err(&pdev->dev, "SDC MMC clk setup failed (%d)\n", ret);
2069 goto bus_clk_disable;
2070 }
2071 msm_host->bulk_clks[0].clk = clk;
2072
2073 msm_host->opp_table = dev_pm_opp_set_clkname(&pdev->dev, "core");
2074 if (IS_ERR(msm_host->opp_table)) {
2075 ret = PTR_ERR(msm_host->opp_table);
2076 goto bus_clk_disable;
2077 }
2078
2079 /* OPP table is optional */
2080 ret = dev_pm_opp_of_add_table(&pdev->dev);
2081 if (!ret) {
2082 msm_host->has_opp_table = true;
2083 } else if (ret != -ENODEV) {
2084 dev_err(&pdev->dev, "Invalid OPP table in Device tree\n");
2085 goto opp_cleanup;
2086 }
2087
2088 /* Vote for maximum clock rate for maximum performance */
2089 ret = dev_pm_opp_set_rate(&pdev->dev, INT_MAX);
2090 if (ret)
2091 dev_warn(&pdev->dev, "core clock boost failed\n");
2092
2093 clk = devm_clk_get(&pdev->dev, "cal");
2094 if (IS_ERR(clk))
2095 clk = NULL;
2096 msm_host->bulk_clks[2].clk = clk;
2097
2098 clk = devm_clk_get(&pdev->dev, "sleep");
2099 if (IS_ERR(clk))
2100 clk = NULL;
2101 msm_host->bulk_clks[3].clk = clk;
2102
2103 ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks),
2104 msm_host->bulk_clks);
2105 if (ret)
2106 goto opp_cleanup;
2107
2108 /*
2109 * xo clock is needed for FLL feature of cm_dll.
2110 * In case if xo clock is not mentioned in DT, warn and proceed.
2111 */
2112 msm_host->xo_clk = devm_clk_get(&pdev->dev, "xo");
2113 if (IS_ERR(msm_host->xo_clk)) {
2114 ret = PTR_ERR(msm_host->xo_clk);
2115 dev_warn(&pdev->dev, "TCXO clk not present (%d)\n", ret);
2116 }
2117
2118 if (!msm_host->mci_removed) {
2119 msm_host->core_mem = devm_platform_ioremap_resource(pdev, 1);
2120 if (IS_ERR(msm_host->core_mem)) {
2121 ret = PTR_ERR(msm_host->core_mem);
2122 goto clk_disable;
2123 }
2124 }
2125
2126 /* Reset the vendor spec register to power on reset state */
2127 writel_relaxed(CORE_VENDOR_SPEC_POR_VAL,
2128 host->ioaddr + msm_offset->core_vendor_spec);
2129
2130 if (!msm_host->mci_removed) {
2131 /* Set HC_MODE_EN bit in HC_MODE register */
2132 msm_host_writel(msm_host, HC_MODE_EN, host,
2133 msm_offset->core_hc_mode);
2134 config = msm_host_readl(msm_host, host,
2135 msm_offset->core_hc_mode);
2136 config |= FF_CLK_SW_RST_DIS;
2137 msm_host_writel(msm_host, config, host,
2138 msm_offset->core_hc_mode);
2139 }
2140
2141 host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
2142 dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n",
2143 host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >>
2144 SDHCI_VENDOR_VER_SHIFT));
2145
2146 core_version = msm_host_readl(msm_host, host,
2147 msm_offset->core_mci_version);
2148 core_major = (core_version & CORE_VERSION_MAJOR_MASK) >>
2149 CORE_VERSION_MAJOR_SHIFT;
2150 core_minor = core_version & CORE_VERSION_MINOR_MASK;
2151 dev_dbg(&pdev->dev, "MCI Version: 0x%08x, major: 0x%04x, minor: 0x%02x\n",
2152 core_version, core_major, core_minor);
2153
2154 if (core_major == 1 && core_minor >= 0x42)
2155 msm_host->use_14lpp_dll_reset = true;
2156
2157 /*
2158 * SDCC 5 controller with major version 1, minor version 0x34 and later
2159 * with HS 400 mode support will use CM DLL instead of CDC LP 533 DLL.
2160 */
2161 if (core_major == 1 && core_minor < 0x34)
2162 msm_host->use_cdclp533 = true;
2163
2164 /*
2165 * Support for some capabilities is not advertised by newer
2166 * controller versions and must be explicitly enabled.
2167 */
2168 if (core_major >= 1 && core_minor != 0x11 && core_minor != 0x12) {
2169 config = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES);
2170 config |= SDHCI_CAN_VDD_300 | SDHCI_CAN_DO_8BIT;
2171 writel_relaxed(config, host->ioaddr +
2172 msm_offset->core_vendor_spec_capabilities0);
2173 }
2174
2175 if (core_major == 1 && core_minor >= 0x49)
2176 msm_host->updated_ddr_cfg = true;
2177
2178 /*
2179 * Power on reset state may trigger power irq if previous status of
2180 * PWRCTL was either BUS_ON or IO_HIGH_V. So before enabling pwr irq
2181 * interrupt in GIC, any pending power irq interrupt should be
2182 * acknowledged. Otherwise power irq interrupt handler would be
2183 * fired prematurely.
2184 */
2185 sdhci_msm_handle_pwr_irq(host, 0);
2186
2187 /*
2188 * Ensure that above writes are propogated before interrupt enablement
2189 * in GIC.
2190 */
2191 mb();
2192
2193 /* Setup IRQ for handling power/voltage tasks with PMIC */
2194 msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
2195 if (msm_host->pwr_irq < 0) {
2196 ret = msm_host->pwr_irq;
2197 goto clk_disable;
2198 }
2199
2200 sdhci_msm_init_pwr_irq_wait(msm_host);
2201 /* Enable pwr irq interrupts */
2202 msm_host_writel(msm_host, INT_MASK, host,
2203 msm_offset->core_pwrctl_mask);
2204
2205 ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL,
2206 sdhci_msm_pwr_irq, IRQF_ONESHOT,
2207 dev_name(&pdev->dev), host);
2208 if (ret) {
2209 dev_err(&pdev->dev, "Request IRQ failed (%d)\n", ret);
2210 goto clk_disable;
2211 }
2212
2213 msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY;
2214
2215 pm_runtime_get_noresume(&pdev->dev);
2216 pm_runtime_set_active(&pdev->dev);
2217 pm_runtime_enable(&pdev->dev);
2218 pm_runtime_set_autosuspend_delay(&pdev->dev,
2219 MSM_MMC_AUTOSUSPEND_DELAY_MS);
2220 pm_runtime_use_autosuspend(&pdev->dev);
2221
2222 host->mmc_host_ops.execute_tuning = sdhci_msm_execute_tuning;
2223 if (of_property_read_bool(node, "supports-cqe"))
2224 ret = sdhci_msm_cqe_add_host(host, pdev);
2225 else
2226 ret = sdhci_add_host(host);
2227 if (ret)
2228 goto pm_runtime_disable;
2229 sdhci_msm_set_regulator_caps(msm_host);
2230
2231 pm_runtime_mark_last_busy(&pdev->dev);
2232 pm_runtime_put_autosuspend(&pdev->dev);
2233
2234 return 0;
2235
2236pm_runtime_disable:
2237 pm_runtime_disable(&pdev->dev);
2238 pm_runtime_set_suspended(&pdev->dev);
2239 pm_runtime_put_noidle(&pdev->dev);
2240clk_disable:
2241 clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
2242 msm_host->bulk_clks);
2243opp_cleanup:
2244 if (msm_host->has_opp_table)
2245 dev_pm_opp_of_remove_table(&pdev->dev);
2246 dev_pm_opp_put_clkname(msm_host->opp_table);
2247bus_clk_disable:
2248 if (!IS_ERR(msm_host->bus_clk))
2249 clk_disable_unprepare(msm_host->bus_clk);
2250pltfm_free:
2251 sdhci_pltfm_free(pdev);
2252 return ret;
2253}
2254
2255static int sdhci_msm_remove(struct platform_device *pdev)
2256{
2257 struct sdhci_host *host = platform_get_drvdata(pdev);
2258 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2259 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
2260 int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) ==
2261 0xffffffff);
2262
2263 sdhci_remove_host(host, dead);
2264
2265 if (msm_host->has_opp_table)
2266 dev_pm_opp_of_remove_table(&pdev->dev);
2267 dev_pm_opp_put_clkname(msm_host->opp_table);
2268 pm_runtime_get_sync(&pdev->dev);
2269 pm_runtime_disable(&pdev->dev);
2270 pm_runtime_put_noidle(&pdev->dev);
2271
2272 clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
2273 msm_host->bulk_clks);
2274 if (!IS_ERR(msm_host->bus_clk))
2275 clk_disable_unprepare(msm_host->bus_clk);
2276 sdhci_pltfm_free(pdev);
2277 return 0;
2278}
2279
2280static __maybe_unused int sdhci_msm_runtime_suspend(struct device *dev)
2281{
2282 struct sdhci_host *host = dev_get_drvdata(dev);
2283 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2284 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
2285
2286 /* Drop the performance vote */
2287 dev_pm_opp_set_rate(dev, 0);
2288 clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
2289 msm_host->bulk_clks);
2290
2291 return 0;
2292}
2293
2294static __maybe_unused int sdhci_msm_runtime_resume(struct device *dev)
2295{
2296 struct sdhci_host *host = dev_get_drvdata(dev);
2297 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2298 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
2299 int ret;
2300
2301 ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks),
2302 msm_host->bulk_clks);
2303 if (ret)
2304 return ret;
2305 /*
2306 * Whenever core-clock is gated dynamically, it's needed to
2307 * restore the SDR DLL settings when the clock is ungated.
2308 */
2309 if (msm_host->restore_dll_config && msm_host->clk_rate)
2310 ret = sdhci_msm_restore_sdr_dll_config(host);
2311
2312 dev_pm_opp_set_rate(dev, msm_host->clk_rate);
2313
2314 return ret;
2315}
2316
2317static const struct dev_pm_ops sdhci_msm_pm_ops = {
2318 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
2319 pm_runtime_force_resume)
2320 SET_RUNTIME_PM_OPS(sdhci_msm_runtime_suspend,
2321 sdhci_msm_runtime_resume,
2322 NULL)
2323};
2324
2325static struct platform_driver sdhci_msm_driver = {
2326 .probe = sdhci_msm_probe,
2327 .remove = sdhci_msm_remove,
2328 .driver = {
2329 .name = "sdhci_msm",
2330 .of_match_table = sdhci_msm_dt_match,
2331 .pm = &sdhci_msm_pm_ops,
2332 },
2333};
2334
2335module_platform_driver(sdhci_msm_driver);
2336
2337MODULE_DESCRIPTION("Qualcomm Secure Digital Host Controller Interface driver");
2338MODULE_LICENSE("GPL v2");