Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v5.12-rc2 1136 lines 29 kB view raw
1// SPDX-License-Identifier: GPL-2.0-or-later 2/* 3 * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de> 4 * Copyright (C) 2013, Imagination Technologies 5 * 6 * JZ4740 SD/MMC controller driver 7 */ 8 9#include <linux/bitops.h> 10#include <linux/clk.h> 11#include <linux/delay.h> 12#include <linux/dmaengine.h> 13#include <linux/dma-mapping.h> 14#include <linux/err.h> 15#include <linux/interrupt.h> 16#include <linux/io.h> 17#include <linux/irq.h> 18#include <linux/mmc/host.h> 19#include <linux/mmc/slot-gpio.h> 20#include <linux/module.h> 21#include <linux/of_device.h> 22#include <linux/pinctrl/consumer.h> 23#include <linux/platform_device.h> 24#include <linux/scatterlist.h> 25 26#include <asm/cacheflush.h> 27 28#define JZ_REG_MMC_STRPCL 0x00 29#define JZ_REG_MMC_STATUS 0x04 30#define JZ_REG_MMC_CLKRT 0x08 31#define JZ_REG_MMC_CMDAT 0x0C 32#define JZ_REG_MMC_RESTO 0x10 33#define JZ_REG_MMC_RDTO 0x14 34#define JZ_REG_MMC_BLKLEN 0x18 35#define JZ_REG_MMC_NOB 0x1C 36#define JZ_REG_MMC_SNOB 0x20 37#define JZ_REG_MMC_IMASK 0x24 38#define JZ_REG_MMC_IREG 0x28 39#define JZ_REG_MMC_CMD 0x2C 40#define JZ_REG_MMC_ARG 0x30 41#define JZ_REG_MMC_RESP_FIFO 0x34 42#define JZ_REG_MMC_RXFIFO 0x38 43#define JZ_REG_MMC_TXFIFO 0x3C 44#define JZ_REG_MMC_LPM 0x40 45#define JZ_REG_MMC_DMAC 0x44 46 47#define JZ_MMC_STRPCL_EXIT_MULTIPLE BIT(7) 48#define JZ_MMC_STRPCL_EXIT_TRANSFER BIT(6) 49#define JZ_MMC_STRPCL_START_READWAIT BIT(5) 50#define JZ_MMC_STRPCL_STOP_READWAIT BIT(4) 51#define JZ_MMC_STRPCL_RESET BIT(3) 52#define JZ_MMC_STRPCL_START_OP BIT(2) 53#define JZ_MMC_STRPCL_CLOCK_CONTROL (BIT(1) | BIT(0)) 54#define JZ_MMC_STRPCL_CLOCK_STOP BIT(0) 55#define JZ_MMC_STRPCL_CLOCK_START BIT(1) 56 57 58#define JZ_MMC_STATUS_IS_RESETTING BIT(15) 59#define JZ_MMC_STATUS_SDIO_INT_ACTIVE BIT(14) 60#define JZ_MMC_STATUS_PRG_DONE BIT(13) 61#define JZ_MMC_STATUS_DATA_TRAN_DONE BIT(12) 62#define JZ_MMC_STATUS_END_CMD_RES BIT(11) 63#define JZ_MMC_STATUS_DATA_FIFO_AFULL BIT(10) 64#define JZ_MMC_STATUS_IS_READWAIT BIT(9) 65#define JZ_MMC_STATUS_CLK_EN BIT(8) 66#define JZ_MMC_STATUS_DATA_FIFO_FULL BIT(7) 67#define JZ_MMC_STATUS_DATA_FIFO_EMPTY BIT(6) 68#define JZ_MMC_STATUS_CRC_RES_ERR BIT(5) 69#define JZ_MMC_STATUS_CRC_READ_ERROR BIT(4) 70#define JZ_MMC_STATUS_TIMEOUT_WRITE BIT(3) 71#define JZ_MMC_STATUS_CRC_WRITE_ERROR BIT(2) 72#define JZ_MMC_STATUS_TIMEOUT_RES BIT(1) 73#define JZ_MMC_STATUS_TIMEOUT_READ BIT(0) 74 75#define JZ_MMC_STATUS_READ_ERROR_MASK (BIT(4) | BIT(0)) 76#define JZ_MMC_STATUS_WRITE_ERROR_MASK (BIT(3) | BIT(2)) 77 78 79#define JZ_MMC_CMDAT_IO_ABORT BIT(11) 80#define JZ_MMC_CMDAT_BUS_WIDTH_4BIT BIT(10) 81#define JZ_MMC_CMDAT_BUS_WIDTH_8BIT (BIT(10) | BIT(9)) 82#define JZ_MMC_CMDAT_BUS_WIDTH_MASK (BIT(10) | BIT(9)) 83#define JZ_MMC_CMDAT_DMA_EN BIT(8) 84#define JZ_MMC_CMDAT_INIT BIT(7) 85#define JZ_MMC_CMDAT_BUSY BIT(6) 86#define JZ_MMC_CMDAT_STREAM BIT(5) 87#define JZ_MMC_CMDAT_WRITE BIT(4) 88#define JZ_MMC_CMDAT_DATA_EN BIT(3) 89#define JZ_MMC_CMDAT_RESPONSE_FORMAT (BIT(2) | BIT(1) | BIT(0)) 90#define JZ_MMC_CMDAT_RSP_R1 1 91#define JZ_MMC_CMDAT_RSP_R2 2 92#define JZ_MMC_CMDAT_RSP_R3 3 93 94#define JZ_MMC_IRQ_SDIO BIT(7) 95#define JZ_MMC_IRQ_TXFIFO_WR_REQ BIT(6) 96#define JZ_MMC_IRQ_RXFIFO_RD_REQ BIT(5) 97#define JZ_MMC_IRQ_END_CMD_RES BIT(2) 98#define JZ_MMC_IRQ_PRG_DONE BIT(1) 99#define JZ_MMC_IRQ_DATA_TRAN_DONE BIT(0) 100 101#define JZ_MMC_DMAC_DMA_SEL BIT(1) 102#define JZ_MMC_DMAC_DMA_EN BIT(0) 103 104#define JZ_MMC_LPM_DRV_RISING BIT(31) 105#define JZ_MMC_LPM_DRV_RISING_QTR_PHASE_DLY BIT(31) 106#define JZ_MMC_LPM_DRV_RISING_1NS_DLY BIT(30) 107#define JZ_MMC_LPM_SMP_RISING_QTR_OR_HALF_PHASE_DLY BIT(29) 108#define JZ_MMC_LPM_LOW_POWER_MODE_EN BIT(0) 109 110#define JZ_MMC_CLK_RATE 24000000 111#define JZ_MMC_REQ_TIMEOUT_MS 5000 112 113enum jz4740_mmc_version { 114 JZ_MMC_JZ4740, 115 JZ_MMC_JZ4725B, 116 JZ_MMC_JZ4760, 117 JZ_MMC_JZ4780, 118 JZ_MMC_X1000, 119}; 120 121enum jz4740_mmc_state { 122 JZ4740_MMC_STATE_READ_RESPONSE, 123 JZ4740_MMC_STATE_TRANSFER_DATA, 124 JZ4740_MMC_STATE_SEND_STOP, 125 JZ4740_MMC_STATE_DONE, 126}; 127 128/* 129 * The MMC core allows to prepare a mmc_request while another mmc_request 130 * is in-flight. This is used via the pre_req/post_req hooks. 131 * This driver uses the pre_req/post_req hooks to map/unmap the mmc_request. 132 * Following what other drivers do (sdhci, dw_mmc) we use the following cookie 133 * flags to keep track of the mmc_request mapping state. 134 * 135 * COOKIE_UNMAPPED: the request is not mapped. 136 * COOKIE_PREMAPPED: the request was mapped in pre_req, 137 * and should be unmapped in post_req. 138 * COOKIE_MAPPED: the request was mapped in the irq handler, 139 * and should be unmapped before mmc_request_done is called.. 140 */ 141enum jz4780_cookie { 142 COOKIE_UNMAPPED = 0, 143 COOKIE_PREMAPPED, 144 COOKIE_MAPPED, 145}; 146 147struct jz4740_mmc_host { 148 struct mmc_host *mmc; 149 struct platform_device *pdev; 150 struct clk *clk; 151 152 enum jz4740_mmc_version version; 153 154 int irq; 155 156 void __iomem *base; 157 struct resource *mem_res; 158 struct mmc_request *req; 159 struct mmc_command *cmd; 160 161 unsigned long waiting; 162 163 uint32_t cmdat; 164 165 uint32_t irq_mask; 166 167 spinlock_t lock; 168 169 struct timer_list timeout_timer; 170 struct sg_mapping_iter miter; 171 enum jz4740_mmc_state state; 172 173 /* DMA support */ 174 struct dma_chan *dma_rx; 175 struct dma_chan *dma_tx; 176 bool use_dma; 177 178/* The DMA trigger level is 8 words, that is to say, the DMA read 179 * trigger is when data words in MSC_RXFIFO is >= 8 and the DMA write 180 * trigger is when data words in MSC_TXFIFO is < 8. 181 */ 182#define JZ4740_MMC_FIFO_HALF_SIZE 8 183}; 184 185static void jz4740_mmc_write_irq_mask(struct jz4740_mmc_host *host, 186 uint32_t val) 187{ 188 if (host->version >= JZ_MMC_JZ4725B) 189 return writel(val, host->base + JZ_REG_MMC_IMASK); 190 else 191 return writew(val, host->base + JZ_REG_MMC_IMASK); 192} 193 194static void jz4740_mmc_write_irq_reg(struct jz4740_mmc_host *host, 195 uint32_t val) 196{ 197 if (host->version >= JZ_MMC_JZ4780) 198 writel(val, host->base + JZ_REG_MMC_IREG); 199 else 200 writew(val, host->base + JZ_REG_MMC_IREG); 201} 202 203static uint32_t jz4740_mmc_read_irq_reg(struct jz4740_mmc_host *host) 204{ 205 if (host->version >= JZ_MMC_JZ4780) 206 return readl(host->base + JZ_REG_MMC_IREG); 207 else 208 return readw(host->base + JZ_REG_MMC_IREG); 209} 210 211/*----------------------------------------------------------------------------*/ 212/* DMA infrastructure */ 213 214static void jz4740_mmc_release_dma_channels(struct jz4740_mmc_host *host) 215{ 216 if (!host->use_dma) 217 return; 218 219 dma_release_channel(host->dma_tx); 220 dma_release_channel(host->dma_rx); 221} 222 223static int jz4740_mmc_acquire_dma_channels(struct jz4740_mmc_host *host) 224{ 225 host->dma_tx = dma_request_chan(mmc_dev(host->mmc), "tx"); 226 if (IS_ERR(host->dma_tx)) { 227 dev_err(mmc_dev(host->mmc), "Failed to get dma_tx channel\n"); 228 return PTR_ERR(host->dma_tx); 229 } 230 231 host->dma_rx = dma_request_chan(mmc_dev(host->mmc), "rx"); 232 if (IS_ERR(host->dma_rx)) { 233 dev_err(mmc_dev(host->mmc), "Failed to get dma_rx channel\n"); 234 dma_release_channel(host->dma_tx); 235 return PTR_ERR(host->dma_rx); 236 } 237 238 return 0; 239} 240 241static inline struct dma_chan *jz4740_mmc_get_dma_chan(struct jz4740_mmc_host *host, 242 struct mmc_data *data) 243{ 244 return (data->flags & MMC_DATA_READ) ? host->dma_rx : host->dma_tx; 245} 246 247static void jz4740_mmc_dma_unmap(struct jz4740_mmc_host *host, 248 struct mmc_data *data) 249{ 250 struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data); 251 enum dma_data_direction dir = mmc_get_dma_dir(data); 252 253 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); 254 data->host_cookie = COOKIE_UNMAPPED; 255} 256 257/* Prepares DMA data for current or next transfer. 258 * A request can be in-flight when this is called. 259 */ 260static int jz4740_mmc_prepare_dma_data(struct jz4740_mmc_host *host, 261 struct mmc_data *data, 262 int cookie) 263{ 264 struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data); 265 enum dma_data_direction dir = mmc_get_dma_dir(data); 266 int sg_count; 267 268 if (data->host_cookie == COOKIE_PREMAPPED) 269 return data->sg_count; 270 271 sg_count = dma_map_sg(chan->device->dev, 272 data->sg, 273 data->sg_len, 274 dir); 275 276 if (sg_count <= 0) { 277 dev_err(mmc_dev(host->mmc), 278 "Failed to map scatterlist for DMA operation\n"); 279 return -EINVAL; 280 } 281 282 data->sg_count = sg_count; 283 data->host_cookie = cookie; 284 285 return data->sg_count; 286} 287 288static int jz4740_mmc_start_dma_transfer(struct jz4740_mmc_host *host, 289 struct mmc_data *data) 290{ 291 struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data); 292 struct dma_async_tx_descriptor *desc; 293 struct dma_slave_config conf = { 294 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 295 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 296 .src_maxburst = JZ4740_MMC_FIFO_HALF_SIZE, 297 .dst_maxburst = JZ4740_MMC_FIFO_HALF_SIZE, 298 }; 299 int sg_count; 300 301 if (data->flags & MMC_DATA_WRITE) { 302 conf.direction = DMA_MEM_TO_DEV; 303 conf.dst_addr = host->mem_res->start + JZ_REG_MMC_TXFIFO; 304 } else { 305 conf.direction = DMA_DEV_TO_MEM; 306 conf.src_addr = host->mem_res->start + JZ_REG_MMC_RXFIFO; 307 } 308 309 sg_count = jz4740_mmc_prepare_dma_data(host, data, COOKIE_MAPPED); 310 if (sg_count < 0) 311 return sg_count; 312 313 dmaengine_slave_config(chan, &conf); 314 desc = dmaengine_prep_slave_sg(chan, data->sg, sg_count, 315 conf.direction, 316 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 317 if (!desc) { 318 dev_err(mmc_dev(host->mmc), 319 "Failed to allocate DMA %s descriptor", 320 conf.direction == DMA_MEM_TO_DEV ? "TX" : "RX"); 321 goto dma_unmap; 322 } 323 324 dmaengine_submit(desc); 325 dma_async_issue_pending(chan); 326 327 return 0; 328 329dma_unmap: 330 if (data->host_cookie == COOKIE_MAPPED) 331 jz4740_mmc_dma_unmap(host, data); 332 return -ENOMEM; 333} 334 335static void jz4740_mmc_pre_request(struct mmc_host *mmc, 336 struct mmc_request *mrq) 337{ 338 struct jz4740_mmc_host *host = mmc_priv(mmc); 339 struct mmc_data *data = mrq->data; 340 341 if (!host->use_dma) 342 return; 343 344 data->host_cookie = COOKIE_UNMAPPED; 345 if (jz4740_mmc_prepare_dma_data(host, data, COOKIE_PREMAPPED) < 0) 346 data->host_cookie = COOKIE_UNMAPPED; 347} 348 349static void jz4740_mmc_post_request(struct mmc_host *mmc, 350 struct mmc_request *mrq, 351 int err) 352{ 353 struct jz4740_mmc_host *host = mmc_priv(mmc); 354 struct mmc_data *data = mrq->data; 355 356 if (data && data->host_cookie != COOKIE_UNMAPPED) 357 jz4740_mmc_dma_unmap(host, data); 358 359 if (err) { 360 struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data); 361 362 dmaengine_terminate_all(chan); 363 } 364} 365 366/*----------------------------------------------------------------------------*/ 367 368static void jz4740_mmc_set_irq_enabled(struct jz4740_mmc_host *host, 369 unsigned int irq, bool enabled) 370{ 371 unsigned long flags; 372 373 spin_lock_irqsave(&host->lock, flags); 374 if (enabled) 375 host->irq_mask &= ~irq; 376 else 377 host->irq_mask |= irq; 378 379 jz4740_mmc_write_irq_mask(host, host->irq_mask); 380 spin_unlock_irqrestore(&host->lock, flags); 381} 382 383static void jz4740_mmc_clock_enable(struct jz4740_mmc_host *host, 384 bool start_transfer) 385{ 386 uint16_t val = JZ_MMC_STRPCL_CLOCK_START; 387 388 if (start_transfer) 389 val |= JZ_MMC_STRPCL_START_OP; 390 391 writew(val, host->base + JZ_REG_MMC_STRPCL); 392} 393 394static void jz4740_mmc_clock_disable(struct jz4740_mmc_host *host) 395{ 396 uint32_t status; 397 unsigned int timeout = 1000; 398 399 writew(JZ_MMC_STRPCL_CLOCK_STOP, host->base + JZ_REG_MMC_STRPCL); 400 do { 401 status = readl(host->base + JZ_REG_MMC_STATUS); 402 } while (status & JZ_MMC_STATUS_CLK_EN && --timeout); 403} 404 405static void jz4740_mmc_reset(struct jz4740_mmc_host *host) 406{ 407 uint32_t status; 408 unsigned int timeout = 1000; 409 410 writew(JZ_MMC_STRPCL_RESET, host->base + JZ_REG_MMC_STRPCL); 411 udelay(10); 412 do { 413 status = readl(host->base + JZ_REG_MMC_STATUS); 414 } while (status & JZ_MMC_STATUS_IS_RESETTING && --timeout); 415} 416 417static void jz4740_mmc_request_done(struct jz4740_mmc_host *host) 418{ 419 struct mmc_request *req; 420 struct mmc_data *data; 421 422 req = host->req; 423 data = req->data; 424 host->req = NULL; 425 426 if (data && data->host_cookie == COOKIE_MAPPED) 427 jz4740_mmc_dma_unmap(host, data); 428 mmc_request_done(host->mmc, req); 429} 430 431static unsigned int jz4740_mmc_poll_irq(struct jz4740_mmc_host *host, 432 unsigned int irq) 433{ 434 unsigned int timeout = 0x800; 435 uint32_t status; 436 437 do { 438 status = jz4740_mmc_read_irq_reg(host); 439 } while (!(status & irq) && --timeout); 440 441 if (timeout == 0) { 442 set_bit(0, &host->waiting); 443 mod_timer(&host->timeout_timer, 444 jiffies + msecs_to_jiffies(JZ_MMC_REQ_TIMEOUT_MS)); 445 jz4740_mmc_set_irq_enabled(host, irq, true); 446 return true; 447 } 448 449 return false; 450} 451 452static void jz4740_mmc_transfer_check_state(struct jz4740_mmc_host *host, 453 struct mmc_data *data) 454{ 455 int status; 456 457 status = readl(host->base + JZ_REG_MMC_STATUS); 458 if (status & JZ_MMC_STATUS_WRITE_ERROR_MASK) { 459 if (status & (JZ_MMC_STATUS_TIMEOUT_WRITE)) { 460 host->req->cmd->error = -ETIMEDOUT; 461 data->error = -ETIMEDOUT; 462 } else { 463 host->req->cmd->error = -EIO; 464 data->error = -EIO; 465 } 466 } else if (status & JZ_MMC_STATUS_READ_ERROR_MASK) { 467 if (status & (JZ_MMC_STATUS_TIMEOUT_READ)) { 468 host->req->cmd->error = -ETIMEDOUT; 469 data->error = -ETIMEDOUT; 470 } else { 471 host->req->cmd->error = -EIO; 472 data->error = -EIO; 473 } 474 } 475} 476 477static bool jz4740_mmc_write_data(struct jz4740_mmc_host *host, 478 struct mmc_data *data) 479{ 480 struct sg_mapping_iter *miter = &host->miter; 481 void __iomem *fifo_addr = host->base + JZ_REG_MMC_TXFIFO; 482 uint32_t *buf; 483 bool timeout; 484 size_t i, j; 485 486 while (sg_miter_next(miter)) { 487 buf = miter->addr; 488 i = miter->length / 4; 489 j = i / 8; 490 i = i & 0x7; 491 while (j) { 492 timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_TXFIFO_WR_REQ); 493 if (unlikely(timeout)) 494 goto poll_timeout; 495 496 writel(buf[0], fifo_addr); 497 writel(buf[1], fifo_addr); 498 writel(buf[2], fifo_addr); 499 writel(buf[3], fifo_addr); 500 writel(buf[4], fifo_addr); 501 writel(buf[5], fifo_addr); 502 writel(buf[6], fifo_addr); 503 writel(buf[7], fifo_addr); 504 buf += 8; 505 --j; 506 } 507 if (unlikely(i)) { 508 timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_TXFIFO_WR_REQ); 509 if (unlikely(timeout)) 510 goto poll_timeout; 511 512 while (i) { 513 writel(*buf, fifo_addr); 514 ++buf; 515 --i; 516 } 517 } 518 data->bytes_xfered += miter->length; 519 } 520 sg_miter_stop(miter); 521 522 return false; 523 524poll_timeout: 525 miter->consumed = (void *)buf - miter->addr; 526 data->bytes_xfered += miter->consumed; 527 sg_miter_stop(miter); 528 529 return true; 530} 531 532static bool jz4740_mmc_read_data(struct jz4740_mmc_host *host, 533 struct mmc_data *data) 534{ 535 struct sg_mapping_iter *miter = &host->miter; 536 void __iomem *fifo_addr = host->base + JZ_REG_MMC_RXFIFO; 537 uint32_t *buf; 538 uint32_t d; 539 uint32_t status; 540 size_t i, j; 541 unsigned int timeout; 542 543 while (sg_miter_next(miter)) { 544 buf = miter->addr; 545 i = miter->length; 546 j = i / 32; 547 i = i & 0x1f; 548 while (j) { 549 timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_RXFIFO_RD_REQ); 550 if (unlikely(timeout)) 551 goto poll_timeout; 552 553 buf[0] = readl(fifo_addr); 554 buf[1] = readl(fifo_addr); 555 buf[2] = readl(fifo_addr); 556 buf[3] = readl(fifo_addr); 557 buf[4] = readl(fifo_addr); 558 buf[5] = readl(fifo_addr); 559 buf[6] = readl(fifo_addr); 560 buf[7] = readl(fifo_addr); 561 562 buf += 8; 563 --j; 564 } 565 566 if (unlikely(i)) { 567 timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_RXFIFO_RD_REQ); 568 if (unlikely(timeout)) 569 goto poll_timeout; 570 571 while (i >= 4) { 572 *buf++ = readl(fifo_addr); 573 i -= 4; 574 } 575 if (unlikely(i > 0)) { 576 d = readl(fifo_addr); 577 memcpy(buf, &d, i); 578 } 579 } 580 data->bytes_xfered += miter->length; 581 582 /* This can go away once MIPS implements 583 * flush_kernel_dcache_page */ 584 flush_dcache_page(miter->page); 585 } 586 sg_miter_stop(miter); 587 588 /* For whatever reason there is sometime one word more in the fifo then 589 * requested */ 590 timeout = 1000; 591 status = readl(host->base + JZ_REG_MMC_STATUS); 592 while (!(status & JZ_MMC_STATUS_DATA_FIFO_EMPTY) && --timeout) { 593 d = readl(fifo_addr); 594 status = readl(host->base + JZ_REG_MMC_STATUS); 595 } 596 597 return false; 598 599poll_timeout: 600 miter->consumed = (void *)buf - miter->addr; 601 data->bytes_xfered += miter->consumed; 602 sg_miter_stop(miter); 603 604 return true; 605} 606 607static void jz4740_mmc_timeout(struct timer_list *t) 608{ 609 struct jz4740_mmc_host *host = from_timer(host, t, timeout_timer); 610 611 if (!test_and_clear_bit(0, &host->waiting)) 612 return; 613 614 jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_END_CMD_RES, false); 615 616 host->req->cmd->error = -ETIMEDOUT; 617 jz4740_mmc_request_done(host); 618} 619 620static void jz4740_mmc_read_response(struct jz4740_mmc_host *host, 621 struct mmc_command *cmd) 622{ 623 int i; 624 uint16_t tmp; 625 void __iomem *fifo_addr = host->base + JZ_REG_MMC_RESP_FIFO; 626 627 if (cmd->flags & MMC_RSP_136) { 628 tmp = readw(fifo_addr); 629 for (i = 0; i < 4; ++i) { 630 cmd->resp[i] = tmp << 24; 631 tmp = readw(fifo_addr); 632 cmd->resp[i] |= tmp << 8; 633 tmp = readw(fifo_addr); 634 cmd->resp[i] |= tmp >> 8; 635 } 636 } else { 637 cmd->resp[0] = readw(fifo_addr) << 24; 638 cmd->resp[0] |= readw(fifo_addr) << 8; 639 cmd->resp[0] |= readw(fifo_addr) & 0xff; 640 } 641} 642 643static void jz4740_mmc_send_command(struct jz4740_mmc_host *host, 644 struct mmc_command *cmd) 645{ 646 uint32_t cmdat = host->cmdat; 647 648 host->cmdat &= ~JZ_MMC_CMDAT_INIT; 649 jz4740_mmc_clock_disable(host); 650 651 host->cmd = cmd; 652 653 if (cmd->flags & MMC_RSP_BUSY) 654 cmdat |= JZ_MMC_CMDAT_BUSY; 655 656 switch (mmc_resp_type(cmd)) { 657 case MMC_RSP_R1B: 658 case MMC_RSP_R1: 659 cmdat |= JZ_MMC_CMDAT_RSP_R1; 660 break; 661 case MMC_RSP_R2: 662 cmdat |= JZ_MMC_CMDAT_RSP_R2; 663 break; 664 case MMC_RSP_R3: 665 cmdat |= JZ_MMC_CMDAT_RSP_R3; 666 break; 667 default: 668 break; 669 } 670 671 if (cmd->data) { 672 cmdat |= JZ_MMC_CMDAT_DATA_EN; 673 if (cmd->data->flags & MMC_DATA_WRITE) 674 cmdat |= JZ_MMC_CMDAT_WRITE; 675 if (host->use_dma) { 676 /* 677 * The 4780's MMC controller has integrated DMA ability 678 * in addition to being able to use the external DMA 679 * controller. It moves DMA control bits to a separate 680 * register. The DMA_SEL bit chooses the external 681 * controller over the integrated one. Earlier SoCs 682 * can only use the external controller, and have a 683 * single DMA enable bit in CMDAT. 684 */ 685 if (host->version >= JZ_MMC_JZ4780) { 686 writel(JZ_MMC_DMAC_DMA_EN | JZ_MMC_DMAC_DMA_SEL, 687 host->base + JZ_REG_MMC_DMAC); 688 } else { 689 cmdat |= JZ_MMC_CMDAT_DMA_EN; 690 } 691 } else if (host->version >= JZ_MMC_JZ4780) { 692 writel(0, host->base + JZ_REG_MMC_DMAC); 693 } 694 695 writew(cmd->data->blksz, host->base + JZ_REG_MMC_BLKLEN); 696 writew(cmd->data->blocks, host->base + JZ_REG_MMC_NOB); 697 } 698 699 writeb(cmd->opcode, host->base + JZ_REG_MMC_CMD); 700 writel(cmd->arg, host->base + JZ_REG_MMC_ARG); 701 writel(cmdat, host->base + JZ_REG_MMC_CMDAT); 702 703 jz4740_mmc_clock_enable(host, 1); 704} 705 706static void jz_mmc_prepare_data_transfer(struct jz4740_mmc_host *host) 707{ 708 struct mmc_command *cmd = host->req->cmd; 709 struct mmc_data *data = cmd->data; 710 int direction; 711 712 if (data->flags & MMC_DATA_READ) 713 direction = SG_MITER_TO_SG; 714 else 715 direction = SG_MITER_FROM_SG; 716 717 sg_miter_start(&host->miter, data->sg, data->sg_len, direction); 718} 719 720 721static irqreturn_t jz_mmc_irq_worker(int irq, void *devid) 722{ 723 struct jz4740_mmc_host *host = (struct jz4740_mmc_host *)devid; 724 struct mmc_command *cmd = host->req->cmd; 725 struct mmc_request *req = host->req; 726 struct mmc_data *data = cmd->data; 727 bool timeout = false; 728 729 if (cmd->error) 730 host->state = JZ4740_MMC_STATE_DONE; 731 732 switch (host->state) { 733 case JZ4740_MMC_STATE_READ_RESPONSE: 734 if (cmd->flags & MMC_RSP_PRESENT) 735 jz4740_mmc_read_response(host, cmd); 736 737 if (!data) 738 break; 739 740 jz_mmc_prepare_data_transfer(host); 741 fallthrough; 742 743 case JZ4740_MMC_STATE_TRANSFER_DATA: 744 if (host->use_dma) { 745 /* Use DMA if enabled. 746 * Data transfer direction is defined later by 747 * relying on data flags in 748 * jz4740_mmc_prepare_dma_data() and 749 * jz4740_mmc_start_dma_transfer(). 750 */ 751 timeout = jz4740_mmc_start_dma_transfer(host, data); 752 data->bytes_xfered = data->blocks * data->blksz; 753 } else if (data->flags & MMC_DATA_READ) 754 /* Use PIO if DMA is not enabled. 755 * Data transfer direction was defined before 756 * by relying on data flags in 757 * jz_mmc_prepare_data_transfer(). 758 */ 759 timeout = jz4740_mmc_read_data(host, data); 760 else 761 timeout = jz4740_mmc_write_data(host, data); 762 763 if (unlikely(timeout)) { 764 host->state = JZ4740_MMC_STATE_TRANSFER_DATA; 765 break; 766 } 767 768 jz4740_mmc_transfer_check_state(host, data); 769 770 timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_DATA_TRAN_DONE); 771 if (unlikely(timeout)) { 772 host->state = JZ4740_MMC_STATE_SEND_STOP; 773 break; 774 } 775 jz4740_mmc_write_irq_reg(host, JZ_MMC_IRQ_DATA_TRAN_DONE); 776 fallthrough; 777 778 case JZ4740_MMC_STATE_SEND_STOP: 779 if (!req->stop) 780 break; 781 782 jz4740_mmc_send_command(host, req->stop); 783 784 if (mmc_resp_type(req->stop) & MMC_RSP_BUSY) { 785 timeout = jz4740_mmc_poll_irq(host, 786 JZ_MMC_IRQ_PRG_DONE); 787 if (timeout) { 788 host->state = JZ4740_MMC_STATE_DONE; 789 break; 790 } 791 } 792 case JZ4740_MMC_STATE_DONE: 793 break; 794 } 795 796 if (!timeout) 797 jz4740_mmc_request_done(host); 798 799 return IRQ_HANDLED; 800} 801 802static irqreturn_t jz_mmc_irq(int irq, void *devid) 803{ 804 struct jz4740_mmc_host *host = devid; 805 struct mmc_command *cmd = host->cmd; 806 uint32_t irq_reg, status, tmp; 807 808 status = readl(host->base + JZ_REG_MMC_STATUS); 809 irq_reg = jz4740_mmc_read_irq_reg(host); 810 811 tmp = irq_reg; 812 irq_reg &= ~host->irq_mask; 813 814 tmp &= ~(JZ_MMC_IRQ_TXFIFO_WR_REQ | JZ_MMC_IRQ_RXFIFO_RD_REQ | 815 JZ_MMC_IRQ_PRG_DONE | JZ_MMC_IRQ_DATA_TRAN_DONE); 816 817 if (tmp != irq_reg) 818 jz4740_mmc_write_irq_reg(host, tmp & ~irq_reg); 819 820 if (irq_reg & JZ_MMC_IRQ_SDIO) { 821 jz4740_mmc_write_irq_reg(host, JZ_MMC_IRQ_SDIO); 822 mmc_signal_sdio_irq(host->mmc); 823 irq_reg &= ~JZ_MMC_IRQ_SDIO; 824 } 825 826 if (host->req && cmd && irq_reg) { 827 if (test_and_clear_bit(0, &host->waiting)) { 828 del_timer(&host->timeout_timer); 829 830 if (status & JZ_MMC_STATUS_TIMEOUT_RES) { 831 cmd->error = -ETIMEDOUT; 832 } else if (status & JZ_MMC_STATUS_CRC_RES_ERR) { 833 cmd->error = -EIO; 834 } else if (status & (JZ_MMC_STATUS_CRC_READ_ERROR | 835 JZ_MMC_STATUS_CRC_WRITE_ERROR)) { 836 if (cmd->data) 837 cmd->data->error = -EIO; 838 cmd->error = -EIO; 839 } 840 841 jz4740_mmc_set_irq_enabled(host, irq_reg, false); 842 jz4740_mmc_write_irq_reg(host, irq_reg); 843 844 return IRQ_WAKE_THREAD; 845 } 846 } 847 848 return IRQ_HANDLED; 849} 850 851static int jz4740_mmc_set_clock_rate(struct jz4740_mmc_host *host, int rate) 852{ 853 int div = 0; 854 int real_rate; 855 856 jz4740_mmc_clock_disable(host); 857 clk_set_rate(host->clk, host->mmc->f_max); 858 859 real_rate = clk_get_rate(host->clk); 860 861 while (real_rate > rate && div < 7) { 862 ++div; 863 real_rate >>= 1; 864 } 865 866 writew(div, host->base + JZ_REG_MMC_CLKRT); 867 868 if (real_rate > 25000000) { 869 if (host->version >= JZ_MMC_X1000) { 870 writel(JZ_MMC_LPM_DRV_RISING_QTR_PHASE_DLY | 871 JZ_MMC_LPM_SMP_RISING_QTR_OR_HALF_PHASE_DLY | 872 JZ_MMC_LPM_LOW_POWER_MODE_EN, 873 host->base + JZ_REG_MMC_LPM); 874 } else if (host->version >= JZ_MMC_JZ4760) { 875 writel(JZ_MMC_LPM_DRV_RISING | 876 JZ_MMC_LPM_LOW_POWER_MODE_EN, 877 host->base + JZ_REG_MMC_LPM); 878 } else if (host->version >= JZ_MMC_JZ4725B) 879 writel(JZ_MMC_LPM_LOW_POWER_MODE_EN, 880 host->base + JZ_REG_MMC_LPM); 881 } 882 883 return real_rate; 884} 885 886static void jz4740_mmc_request(struct mmc_host *mmc, struct mmc_request *req) 887{ 888 struct jz4740_mmc_host *host = mmc_priv(mmc); 889 890 host->req = req; 891 892 jz4740_mmc_write_irq_reg(host, ~0); 893 jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_END_CMD_RES, true); 894 895 host->state = JZ4740_MMC_STATE_READ_RESPONSE; 896 set_bit(0, &host->waiting); 897 mod_timer(&host->timeout_timer, 898 jiffies + msecs_to_jiffies(JZ_MMC_REQ_TIMEOUT_MS)); 899 jz4740_mmc_send_command(host, req->cmd); 900} 901 902static void jz4740_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 903{ 904 struct jz4740_mmc_host *host = mmc_priv(mmc); 905 if (ios->clock) 906 jz4740_mmc_set_clock_rate(host, ios->clock); 907 908 switch (ios->power_mode) { 909 case MMC_POWER_UP: 910 jz4740_mmc_reset(host); 911 if (!IS_ERR(mmc->supply.vmmc)) 912 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); 913 host->cmdat |= JZ_MMC_CMDAT_INIT; 914 clk_prepare_enable(host->clk); 915 break; 916 case MMC_POWER_ON: 917 break; 918 default: 919 if (!IS_ERR(mmc->supply.vmmc)) 920 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 921 clk_disable_unprepare(host->clk); 922 break; 923 } 924 925 switch (ios->bus_width) { 926 case MMC_BUS_WIDTH_1: 927 host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_MASK; 928 break; 929 case MMC_BUS_WIDTH_4: 930 host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_MASK; 931 host->cmdat |= JZ_MMC_CMDAT_BUS_WIDTH_4BIT; 932 break; 933 case MMC_BUS_WIDTH_8: 934 host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_MASK; 935 host->cmdat |= JZ_MMC_CMDAT_BUS_WIDTH_8BIT; 936 break; 937 default: 938 break; 939 } 940} 941 942static void jz4740_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) 943{ 944 struct jz4740_mmc_host *host = mmc_priv(mmc); 945 jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_SDIO, enable); 946} 947 948static const struct mmc_host_ops jz4740_mmc_ops = { 949 .request = jz4740_mmc_request, 950 .pre_req = jz4740_mmc_pre_request, 951 .post_req = jz4740_mmc_post_request, 952 .set_ios = jz4740_mmc_set_ios, 953 .get_ro = mmc_gpio_get_ro, 954 .get_cd = mmc_gpio_get_cd, 955 .enable_sdio_irq = jz4740_mmc_enable_sdio_irq, 956}; 957 958static const struct of_device_id jz4740_mmc_of_match[] = { 959 { .compatible = "ingenic,jz4740-mmc", .data = (void *) JZ_MMC_JZ4740 }, 960 { .compatible = "ingenic,jz4725b-mmc", .data = (void *)JZ_MMC_JZ4725B }, 961 { .compatible = "ingenic,jz4760-mmc", .data = (void *) JZ_MMC_JZ4760 }, 962 { .compatible = "ingenic,jz4780-mmc", .data = (void *) JZ_MMC_JZ4780 }, 963 { .compatible = "ingenic,x1000-mmc", .data = (void *) JZ_MMC_X1000 }, 964 {}, 965}; 966MODULE_DEVICE_TABLE(of, jz4740_mmc_of_match); 967 968static int jz4740_mmc_probe(struct platform_device* pdev) 969{ 970 int ret; 971 struct mmc_host *mmc; 972 struct jz4740_mmc_host *host; 973 const struct of_device_id *match; 974 975 mmc = mmc_alloc_host(sizeof(struct jz4740_mmc_host), &pdev->dev); 976 if (!mmc) { 977 dev_err(&pdev->dev, "Failed to alloc mmc host structure\n"); 978 return -ENOMEM; 979 } 980 981 host = mmc_priv(mmc); 982 983 match = of_match_device(jz4740_mmc_of_match, &pdev->dev); 984 if (match) { 985 host->version = (enum jz4740_mmc_version)match->data; 986 } else { 987 /* JZ4740 should be the only one using legacy probe */ 988 host->version = JZ_MMC_JZ4740; 989 } 990 991 ret = mmc_of_parse(mmc); 992 if (ret) { 993 dev_err_probe(&pdev->dev, ret, "could not parse device properties\n"); 994 goto err_free_host; 995 } 996 997 mmc_regulator_get_supply(mmc); 998 999 host->irq = platform_get_irq(pdev, 0); 1000 if (host->irq < 0) { 1001 ret = host->irq; 1002 goto err_free_host; 1003 } 1004 1005 host->clk = devm_clk_get(&pdev->dev, "mmc"); 1006 if (IS_ERR(host->clk)) { 1007 ret = PTR_ERR(host->clk); 1008 dev_err(&pdev->dev, "Failed to get mmc clock\n"); 1009 goto err_free_host; 1010 } 1011 1012 host->mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1013 host->base = devm_ioremap_resource(&pdev->dev, host->mem_res); 1014 if (IS_ERR(host->base)) { 1015 ret = PTR_ERR(host->base); 1016 dev_err(&pdev->dev, "Failed to ioremap base memory\n"); 1017 goto err_free_host; 1018 } 1019 1020 mmc->ops = &jz4740_mmc_ops; 1021 if (!mmc->f_max) 1022 mmc->f_max = JZ_MMC_CLK_RATE; 1023 mmc->f_min = mmc->f_max / 128; 1024 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 1025 1026 /* 1027 * We use a fixed timeout of 5s, hence inform the core about it. A 1028 * future improvement should instead respect the cmd->busy_timeout. 1029 */ 1030 mmc->max_busy_timeout = JZ_MMC_REQ_TIMEOUT_MS; 1031 1032 mmc->max_blk_size = (1 << 10) - 1; 1033 mmc->max_blk_count = (1 << 15) - 1; 1034 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 1035 1036 mmc->max_segs = 128; 1037 mmc->max_seg_size = mmc->max_req_size; 1038 1039 host->mmc = mmc; 1040 host->pdev = pdev; 1041 spin_lock_init(&host->lock); 1042 host->irq_mask = ~0; 1043 1044 jz4740_mmc_reset(host); 1045 1046 ret = request_threaded_irq(host->irq, jz_mmc_irq, jz_mmc_irq_worker, 0, 1047 dev_name(&pdev->dev), host); 1048 if (ret) { 1049 dev_err(&pdev->dev, "Failed to request irq: %d\n", ret); 1050 goto err_free_host; 1051 } 1052 1053 jz4740_mmc_clock_disable(host); 1054 timer_setup(&host->timeout_timer, jz4740_mmc_timeout, 0); 1055 1056 ret = jz4740_mmc_acquire_dma_channels(host); 1057 if (ret == -EPROBE_DEFER) 1058 goto err_free_irq; 1059 host->use_dma = !ret; 1060 1061 platform_set_drvdata(pdev, host); 1062 ret = mmc_add_host(mmc); 1063 1064 if (ret) { 1065 dev_err(&pdev->dev, "Failed to add mmc host: %d\n", ret); 1066 goto err_release_dma; 1067 } 1068 dev_info(&pdev->dev, "Ingenic SD/MMC card driver registered\n"); 1069 1070 dev_info(&pdev->dev, "Using %s, %d-bit mode\n", 1071 host->use_dma ? "DMA" : "PIO", 1072 (mmc->caps & MMC_CAP_8_BIT_DATA) ? 8 : 1073 ((mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1)); 1074 1075 return 0; 1076 1077err_release_dma: 1078 if (host->use_dma) 1079 jz4740_mmc_release_dma_channels(host); 1080err_free_irq: 1081 free_irq(host->irq, host); 1082err_free_host: 1083 mmc_free_host(mmc); 1084 1085 return ret; 1086} 1087 1088static int jz4740_mmc_remove(struct platform_device *pdev) 1089{ 1090 struct jz4740_mmc_host *host = platform_get_drvdata(pdev); 1091 1092 del_timer_sync(&host->timeout_timer); 1093 jz4740_mmc_set_irq_enabled(host, 0xff, false); 1094 jz4740_mmc_reset(host); 1095 1096 mmc_remove_host(host->mmc); 1097 1098 free_irq(host->irq, host); 1099 1100 if (host->use_dma) 1101 jz4740_mmc_release_dma_channels(host); 1102 1103 mmc_free_host(host->mmc); 1104 1105 return 0; 1106} 1107 1108static int __maybe_unused jz4740_mmc_suspend(struct device *dev) 1109{ 1110 return pinctrl_pm_select_sleep_state(dev); 1111} 1112 1113static int __maybe_unused jz4740_mmc_resume(struct device *dev) 1114{ 1115 return pinctrl_select_default_state(dev); 1116} 1117 1118static SIMPLE_DEV_PM_OPS(jz4740_mmc_pm_ops, jz4740_mmc_suspend, 1119 jz4740_mmc_resume); 1120 1121static struct platform_driver jz4740_mmc_driver = { 1122 .probe = jz4740_mmc_probe, 1123 .remove = jz4740_mmc_remove, 1124 .driver = { 1125 .name = "jz4740-mmc", 1126 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 1127 .of_match_table = of_match_ptr(jz4740_mmc_of_match), 1128 .pm = pm_ptr(&jz4740_mmc_pm_ops), 1129 }, 1130}; 1131 1132module_platform_driver(jz4740_mmc_driver); 1133 1134MODULE_DESCRIPTION("JZ4740 SD/MMC controller driver"); 1135MODULE_LICENSE("GPL"); 1136MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");