Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) STMicroelectronics 2018 - All Rights Reserved
4 * Author: Ludovic.barre@st.com for STMicroelectronics.
5 */
6#include <linux/bitfield.h>
7#include <linux/delay.h>
8#include <linux/dma-mapping.h>
9#include <linux/iopoll.h>
10#include <linux/mmc/host.h>
11#include <linux/mmc/card.h>
12#include <linux/of_address.h>
13#include <linux/reset.h>
14#include <linux/scatterlist.h>
15#include "mmci.h"
16
17#define SDMMC_LLI_BUF_LEN PAGE_SIZE
18#define SDMMC_IDMA_BURST BIT(MMCI_STM32_IDMABNDT_SHIFT)
19
20#define DLYB_CR 0x0
21#define DLYB_CR_DEN BIT(0)
22#define DLYB_CR_SEN BIT(1)
23
24#define DLYB_CFGR 0x4
25#define DLYB_CFGR_SEL_MASK GENMASK(3, 0)
26#define DLYB_CFGR_UNIT_MASK GENMASK(14, 8)
27#define DLYB_CFGR_LNG_MASK GENMASK(27, 16)
28#define DLYB_CFGR_LNGF BIT(31)
29
30#define DLYB_NB_DELAY 11
31#define DLYB_CFGR_SEL_MAX (DLYB_NB_DELAY + 1)
32#define DLYB_CFGR_UNIT_MAX 127
33
34#define DLYB_LNG_TIMEOUT_US 1000
35#define SDMMC_VSWEND_TIMEOUT_US 10000
36
37struct sdmmc_lli_desc {
38 u32 idmalar;
39 u32 idmabase;
40 u32 idmasize;
41};
42
43struct sdmmc_idma {
44 dma_addr_t sg_dma;
45 void *sg_cpu;
46};
47
48struct sdmmc_dlyb {
49 void __iomem *base;
50 u32 unit;
51 u32 max;
52};
53
54static int sdmmc_idma_validate_data(struct mmci_host *host,
55 struct mmc_data *data)
56{
57 struct scatterlist *sg;
58 int i;
59
60 /*
61 * idma has constraints on idmabase & idmasize for each element
62 * excepted the last element which has no constraint on idmasize
63 */
64 for_each_sg(data->sg, sg, data->sg_len - 1, i) {
65 if (!IS_ALIGNED(data->sg->offset, sizeof(u32)) ||
66 !IS_ALIGNED(data->sg->length, SDMMC_IDMA_BURST)) {
67 dev_err(mmc_dev(host->mmc),
68 "unaligned scatterlist: ofst:%x length:%d\n",
69 data->sg->offset, data->sg->length);
70 return -EINVAL;
71 }
72 }
73
74 if (!IS_ALIGNED(data->sg->offset, sizeof(u32))) {
75 dev_err(mmc_dev(host->mmc),
76 "unaligned last scatterlist: ofst:%x length:%d\n",
77 data->sg->offset, data->sg->length);
78 return -EINVAL;
79 }
80
81 return 0;
82}
83
84static int _sdmmc_idma_prep_data(struct mmci_host *host,
85 struct mmc_data *data)
86{
87 int n_elem;
88
89 n_elem = dma_map_sg(mmc_dev(host->mmc),
90 data->sg,
91 data->sg_len,
92 mmc_get_dma_dir(data));
93
94 if (!n_elem) {
95 dev_err(mmc_dev(host->mmc), "dma_map_sg failed\n");
96 return -EINVAL;
97 }
98
99 return 0;
100}
101
102static int sdmmc_idma_prep_data(struct mmci_host *host,
103 struct mmc_data *data, bool next)
104{
105 /* Check if job is already prepared. */
106 if (!next && data->host_cookie == host->next_cookie)
107 return 0;
108
109 return _sdmmc_idma_prep_data(host, data);
110}
111
112static void sdmmc_idma_unprep_data(struct mmci_host *host,
113 struct mmc_data *data, int err)
114{
115 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
116 mmc_get_dma_dir(data));
117}
118
119static int sdmmc_idma_setup(struct mmci_host *host)
120{
121 struct sdmmc_idma *idma;
122
123 idma = devm_kzalloc(mmc_dev(host->mmc), sizeof(*idma), GFP_KERNEL);
124 if (!idma)
125 return -ENOMEM;
126
127 host->dma_priv = idma;
128
129 if (host->variant->dma_lli) {
130 idma->sg_cpu = dmam_alloc_coherent(mmc_dev(host->mmc),
131 SDMMC_LLI_BUF_LEN,
132 &idma->sg_dma, GFP_KERNEL);
133 if (!idma->sg_cpu) {
134 dev_err(mmc_dev(host->mmc),
135 "Failed to alloc IDMA descriptor\n");
136 return -ENOMEM;
137 }
138 host->mmc->max_segs = SDMMC_LLI_BUF_LEN /
139 sizeof(struct sdmmc_lli_desc);
140 host->mmc->max_seg_size = host->variant->stm32_idmabsize_mask;
141 } else {
142 host->mmc->max_segs = 1;
143 host->mmc->max_seg_size = host->mmc->max_req_size;
144 }
145
146 return 0;
147}
148
149static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl)
150
151{
152 struct sdmmc_idma *idma = host->dma_priv;
153 struct sdmmc_lli_desc *desc = (struct sdmmc_lli_desc *)idma->sg_cpu;
154 struct mmc_data *data = host->data;
155 struct scatterlist *sg;
156 int i;
157
158 if (!host->variant->dma_lli || data->sg_len == 1) {
159 writel_relaxed(sg_dma_address(data->sg),
160 host->base + MMCI_STM32_IDMABASE0R);
161 writel_relaxed(MMCI_STM32_IDMAEN,
162 host->base + MMCI_STM32_IDMACTRLR);
163 return 0;
164 }
165
166 for_each_sg(data->sg, sg, data->sg_len, i) {
167 desc[i].idmalar = (i + 1) * sizeof(struct sdmmc_lli_desc);
168 desc[i].idmalar |= MMCI_STM32_ULA | MMCI_STM32_ULS
169 | MMCI_STM32_ABR;
170 desc[i].idmabase = sg_dma_address(sg);
171 desc[i].idmasize = sg_dma_len(sg);
172 }
173
174 /* notice the end of link list */
175 desc[data->sg_len - 1].idmalar &= ~MMCI_STM32_ULA;
176
177 dma_wmb();
178 writel_relaxed(idma->sg_dma, host->base + MMCI_STM32_IDMABAR);
179 writel_relaxed(desc[0].idmalar, host->base + MMCI_STM32_IDMALAR);
180 writel_relaxed(desc[0].idmabase, host->base + MMCI_STM32_IDMABASE0R);
181 writel_relaxed(desc[0].idmasize, host->base + MMCI_STM32_IDMABSIZER);
182 writel_relaxed(MMCI_STM32_IDMAEN | MMCI_STM32_IDMALLIEN,
183 host->base + MMCI_STM32_IDMACTRLR);
184
185 return 0;
186}
187
188static void sdmmc_idma_finalize(struct mmci_host *host, struct mmc_data *data)
189{
190 writel_relaxed(0, host->base + MMCI_STM32_IDMACTRLR);
191}
192
193static void mmci_sdmmc_set_clkreg(struct mmci_host *host, unsigned int desired)
194{
195 unsigned int clk = 0, ddr = 0;
196
197 if (host->mmc->ios.timing == MMC_TIMING_MMC_DDR52 ||
198 host->mmc->ios.timing == MMC_TIMING_UHS_DDR50)
199 ddr = MCI_STM32_CLK_DDR;
200
201 /*
202 * cclk = mclk / (2 * clkdiv)
203 * clkdiv 0 => bypass
204 * in ddr mode bypass is not possible
205 */
206 if (desired) {
207 if (desired >= host->mclk && !ddr) {
208 host->cclk = host->mclk;
209 } else {
210 clk = DIV_ROUND_UP(host->mclk, 2 * desired);
211 if (clk > MCI_STM32_CLK_CLKDIV_MSK)
212 clk = MCI_STM32_CLK_CLKDIV_MSK;
213 host->cclk = host->mclk / (2 * clk);
214 }
215 } else {
216 /*
217 * while power-on phase the clock can't be define to 0,
218 * Only power-off and power-cyc deactivate the clock.
219 * if desired clock is 0, set max divider
220 */
221 clk = MCI_STM32_CLK_CLKDIV_MSK;
222 host->cclk = host->mclk / (2 * clk);
223 }
224
225 /* Set actual clock for debug */
226 if (host->mmc->ios.power_mode == MMC_POWER_ON)
227 host->mmc->actual_clock = host->cclk;
228 else
229 host->mmc->actual_clock = 0;
230
231 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4)
232 clk |= MCI_STM32_CLK_WIDEBUS_4;
233 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
234 clk |= MCI_STM32_CLK_WIDEBUS_8;
235
236 clk |= MCI_STM32_CLK_HWFCEN;
237 clk |= host->clk_reg_add;
238 clk |= ddr;
239
240 /*
241 * SDMMC_FBCK is selected when an external Delay Block is needed
242 * with SDR104.
243 */
244 if (host->mmc->ios.timing >= MMC_TIMING_UHS_SDR50) {
245 clk |= MCI_STM32_CLK_BUSSPEED;
246 if (host->mmc->ios.timing == MMC_TIMING_UHS_SDR104) {
247 clk &= ~MCI_STM32_CLK_SEL_MSK;
248 clk |= MCI_STM32_CLK_SELFBCK;
249 }
250 }
251
252 mmci_write_clkreg(host, clk);
253}
254
255static void sdmmc_dlyb_input_ck(struct sdmmc_dlyb *dlyb)
256{
257 if (!dlyb || !dlyb->base)
258 return;
259
260 /* Output clock = Input clock */
261 writel_relaxed(0, dlyb->base + DLYB_CR);
262}
263
264static void mmci_sdmmc_set_pwrreg(struct mmci_host *host, unsigned int pwr)
265{
266 struct mmc_ios ios = host->mmc->ios;
267 struct sdmmc_dlyb *dlyb = host->variant_priv;
268
269 /* adds OF options */
270 pwr = host->pwr_reg_add;
271
272 sdmmc_dlyb_input_ck(dlyb);
273
274 if (ios.power_mode == MMC_POWER_OFF) {
275 /* Only a reset could power-off sdmmc */
276 reset_control_assert(host->rst);
277 udelay(2);
278 reset_control_deassert(host->rst);
279
280 /*
281 * Set the SDMMC in Power-cycle state.
282 * This will make that the SDMMC_D[7:0], SDMMC_CMD and SDMMC_CK
283 * are driven low, to prevent the Card from being supplied
284 * through the signal lines.
285 */
286 mmci_write_pwrreg(host, MCI_STM32_PWR_CYC | pwr);
287 } else if (ios.power_mode == MMC_POWER_ON) {
288 /*
289 * After power-off (reset): the irq mask defined in probe
290 * functionis lost
291 * ault irq mask (probe) must be activated
292 */
293 writel(MCI_IRQENABLE | host->variant->start_err,
294 host->base + MMCIMASK0);
295
296 /* preserves voltage switch bits */
297 pwr |= host->pwr_reg & (MCI_STM32_VSWITCHEN |
298 MCI_STM32_VSWITCH);
299
300 /*
301 * After a power-cycle state, we must set the SDMMC in
302 * Power-off. The SDMMC_D[7:0], SDMMC_CMD and SDMMC_CK are
303 * driven high. Then we can set the SDMMC to Power-on state
304 */
305 mmci_write_pwrreg(host, MCI_PWR_OFF | pwr);
306 mdelay(1);
307 mmci_write_pwrreg(host, MCI_PWR_ON | pwr);
308 }
309}
310
311static u32 sdmmc_get_dctrl_cfg(struct mmci_host *host)
312{
313 u32 datactrl;
314
315 datactrl = mmci_dctrl_blksz(host);
316
317 if (host->mmc->card && mmc_card_sdio(host->mmc->card) &&
318 host->data->blocks == 1)
319 datactrl |= MCI_DPSM_STM32_MODE_SDIO;
320 else if (host->data->stop && !host->mrq->sbc)
321 datactrl |= MCI_DPSM_STM32_MODE_BLOCK_STOP;
322 else
323 datactrl |= MCI_DPSM_STM32_MODE_BLOCK;
324
325 return datactrl;
326}
327
328static bool sdmmc_busy_complete(struct mmci_host *host, u32 status, u32 err_msk)
329{
330 void __iomem *base = host->base;
331 u32 busy_d0, busy_d0end, mask, sdmmc_status;
332
333 mask = readl_relaxed(base + MMCIMASK0);
334 sdmmc_status = readl_relaxed(base + MMCISTATUS);
335 busy_d0end = sdmmc_status & MCI_STM32_BUSYD0END;
336 busy_d0 = sdmmc_status & MCI_STM32_BUSYD0;
337
338 /* complete if there is an error or busy_d0end */
339 if ((status & err_msk) || busy_d0end)
340 goto complete;
341
342 /*
343 * On response the busy signaling is reflected in the BUSYD0 flag.
344 * if busy_d0 is in-progress we must activate busyd0end interrupt
345 * to wait this completion. Else this request has no busy step.
346 */
347 if (busy_d0) {
348 if (!host->busy_status) {
349 writel_relaxed(mask | host->variant->busy_detect_mask,
350 base + MMCIMASK0);
351 host->busy_status = status &
352 (MCI_CMDSENT | MCI_CMDRESPEND);
353 }
354 return false;
355 }
356
357complete:
358 if (host->busy_status) {
359 writel_relaxed(mask & ~host->variant->busy_detect_mask,
360 base + MMCIMASK0);
361 host->busy_status = 0;
362 }
363
364 writel_relaxed(host->variant->busy_detect_mask, base + MMCICLEAR);
365
366 return true;
367}
368
369static void sdmmc_dlyb_set_cfgr(struct sdmmc_dlyb *dlyb,
370 int unit, int phase, bool sampler)
371{
372 u32 cfgr;
373
374 writel_relaxed(DLYB_CR_SEN | DLYB_CR_DEN, dlyb->base + DLYB_CR);
375
376 cfgr = FIELD_PREP(DLYB_CFGR_UNIT_MASK, unit) |
377 FIELD_PREP(DLYB_CFGR_SEL_MASK, phase);
378 writel_relaxed(cfgr, dlyb->base + DLYB_CFGR);
379
380 if (!sampler)
381 writel_relaxed(DLYB_CR_DEN, dlyb->base + DLYB_CR);
382}
383
384static int sdmmc_dlyb_lng_tuning(struct mmci_host *host)
385{
386 struct sdmmc_dlyb *dlyb = host->variant_priv;
387 u32 cfgr;
388 int i, lng, ret;
389
390 for (i = 0; i <= DLYB_CFGR_UNIT_MAX; i++) {
391 sdmmc_dlyb_set_cfgr(dlyb, i, DLYB_CFGR_SEL_MAX, true);
392
393 ret = readl_relaxed_poll_timeout(dlyb->base + DLYB_CFGR, cfgr,
394 (cfgr & DLYB_CFGR_LNGF),
395 1, DLYB_LNG_TIMEOUT_US);
396 if (ret) {
397 dev_warn(mmc_dev(host->mmc),
398 "delay line cfg timeout unit:%d cfgr:%d\n",
399 i, cfgr);
400 continue;
401 }
402
403 lng = FIELD_GET(DLYB_CFGR_LNG_MASK, cfgr);
404 if (lng < BIT(DLYB_NB_DELAY) && lng > 0)
405 break;
406 }
407
408 if (i > DLYB_CFGR_UNIT_MAX)
409 return -EINVAL;
410
411 dlyb->unit = i;
412 dlyb->max = __fls(lng);
413
414 return 0;
415}
416
417static int sdmmc_dlyb_phase_tuning(struct mmci_host *host, u32 opcode)
418{
419 struct sdmmc_dlyb *dlyb = host->variant_priv;
420 int cur_len = 0, max_len = 0, end_of_len = 0;
421 int phase;
422
423 for (phase = 0; phase <= dlyb->max; phase++) {
424 sdmmc_dlyb_set_cfgr(dlyb, dlyb->unit, phase, false);
425
426 if (mmc_send_tuning(host->mmc, opcode, NULL)) {
427 cur_len = 0;
428 } else {
429 cur_len++;
430 if (cur_len > max_len) {
431 max_len = cur_len;
432 end_of_len = phase;
433 }
434 }
435 }
436
437 if (!max_len) {
438 dev_err(mmc_dev(host->mmc), "no tuning point found\n");
439 return -EINVAL;
440 }
441
442 phase = end_of_len - max_len / 2;
443 sdmmc_dlyb_set_cfgr(dlyb, dlyb->unit, phase, false);
444
445 dev_dbg(mmc_dev(host->mmc), "unit:%d max_dly:%d phase:%d\n",
446 dlyb->unit, dlyb->max, phase);
447
448 return 0;
449}
450
451static int sdmmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
452{
453 struct mmci_host *host = mmc_priv(mmc);
454 struct sdmmc_dlyb *dlyb = host->variant_priv;
455
456 if (!dlyb || !dlyb->base)
457 return -EINVAL;
458
459 if (sdmmc_dlyb_lng_tuning(host))
460 return -EINVAL;
461
462 return sdmmc_dlyb_phase_tuning(host, opcode);
463}
464
465static void sdmmc_pre_sig_volt_vswitch(struct mmci_host *host)
466{
467 /* clear the voltage switch completion flag */
468 writel_relaxed(MCI_STM32_VSWENDC, host->base + MMCICLEAR);
469 /* enable Voltage switch procedure */
470 mmci_write_pwrreg(host, host->pwr_reg | MCI_STM32_VSWITCHEN);
471}
472
473static int sdmmc_post_sig_volt_switch(struct mmci_host *host,
474 struct mmc_ios *ios)
475{
476 unsigned long flags;
477 u32 status;
478 int ret = 0;
479
480 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
481 spin_lock_irqsave(&host->lock, flags);
482 mmci_write_pwrreg(host, host->pwr_reg | MCI_STM32_VSWITCH);
483 spin_unlock_irqrestore(&host->lock, flags);
484
485 /* wait voltage switch completion while 10ms */
486 ret = readl_relaxed_poll_timeout(host->base + MMCISTATUS,
487 status,
488 (status & MCI_STM32_VSWEND),
489 10, SDMMC_VSWEND_TIMEOUT_US);
490
491 writel_relaxed(MCI_STM32_VSWENDC | MCI_STM32_CKSTOPC,
492 host->base + MMCICLEAR);
493 mmci_write_pwrreg(host, host->pwr_reg &
494 ~(MCI_STM32_VSWITCHEN | MCI_STM32_VSWITCH));
495 }
496
497 return ret;
498}
499
500static struct mmci_host_ops sdmmc_variant_ops = {
501 .validate_data = sdmmc_idma_validate_data,
502 .prep_data = sdmmc_idma_prep_data,
503 .unprep_data = sdmmc_idma_unprep_data,
504 .get_datactrl_cfg = sdmmc_get_dctrl_cfg,
505 .dma_setup = sdmmc_idma_setup,
506 .dma_start = sdmmc_idma_start,
507 .dma_finalize = sdmmc_idma_finalize,
508 .set_clkreg = mmci_sdmmc_set_clkreg,
509 .set_pwrreg = mmci_sdmmc_set_pwrreg,
510 .busy_complete = sdmmc_busy_complete,
511 .pre_sig_volt_switch = sdmmc_pre_sig_volt_vswitch,
512 .post_sig_volt_switch = sdmmc_post_sig_volt_switch,
513};
514
515void sdmmc_variant_init(struct mmci_host *host)
516{
517 struct device_node *np = host->mmc->parent->of_node;
518 void __iomem *base_dlyb;
519 struct sdmmc_dlyb *dlyb;
520
521 host->ops = &sdmmc_variant_ops;
522
523 base_dlyb = devm_of_iomap(mmc_dev(host->mmc), np, 1, NULL);
524 if (IS_ERR(base_dlyb))
525 return;
526
527 dlyb = devm_kzalloc(mmc_dev(host->mmc), sizeof(*dlyb), GFP_KERNEL);
528 if (!dlyb)
529 return;
530
531 dlyb->base = base_dlyb;
532 host->variant_priv = dlyb;
533 host->mmc_ops->execute_tuning = sdmmc_execute_tuning;
534}