Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mmc: cavium: Add scatter-gather DMA support

Add Support for the scatter-gather DMA available in the
ThunderX MMC units. Up to 16 DMA requests can be processed
together.

Signed-off-by: Jan Glauber <jglauber@cavium.com>
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>

authored by

Jan Glauber and committed by
Ulf Hansson
cd76e5c5 166bac38

+127 -10
+4 -1
drivers/mmc/host/cavium-thunderx.c
··· 82 82 host->dma_base = host->base; 83 83 84 84 host->reg_off = 0x2000; 85 - host->reg_off_dma = 0x180; 85 + host->reg_off_dma = 0x160; 86 86 87 87 host->clk = devm_clk_get(dev, NULL); 88 88 if (IS_ERR(host->clk)) ··· 101 101 host->release_bus = thunder_mmc_release_bus; 102 102 host->int_enable = thunder_mmc_int_enable; 103 103 104 + host->use_sg = true; 104 105 host->big_dma_addr = true; 105 106 host->need_irq_handler_lock = true; 106 107 host->last_slot = -1; ··· 116 115 */ 117 116 writeq(127, host->base + MIO_EMM_INT_EN(host)); 118 117 writeq(3, host->base + MIO_EMM_DMA_INT_ENA_W1C(host)); 118 + /* Clear DMA FIFO */ 119 + writeq(BIT_ULL(16), host->base + MIO_EMM_DMA_FIFO_CFG(host)); 119 120 120 121 ret = thunder_mmc_register_interrupts(host, pdev); 121 122 if (ret)
+101 -3
drivers/mmc/host/cavium.c
··· 377 377 return 1; 378 378 } 379 379 380 + static int finish_dma_sg(struct cvm_mmc_host *host, struct mmc_data *data) 381 + { 382 + u64 fifo_cfg; 383 + int count; 384 + 385 + /* Check if there are any pending requests left */ 386 + fifo_cfg = readq(host->dma_base + MIO_EMM_DMA_FIFO_CFG(host)); 387 + count = FIELD_GET(MIO_EMM_DMA_FIFO_CFG_COUNT, fifo_cfg); 388 + if (count) 389 + dev_err(host->dev, "%u requests still pending\n", count); 390 + 391 + data->bytes_xfered = data->blocks * data->blksz; 392 + data->error = 0; 393 + 394 + /* Clear and disable FIFO */ 395 + writeq(BIT_ULL(16), host->dma_base + MIO_EMM_DMA_FIFO_CFG(host)); 396 + dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data)); 397 + return 1; 398 + } 399 + 380 400 static int finish_dma(struct cvm_mmc_host *host, struct mmc_data *data) 381 401 { 382 - return finish_dma_single(host, data); 402 + if (host->use_sg && data->sg_len > 1) 403 + return finish_dma_sg(host, data); 404 + else 405 + return finish_dma_single(host, data); 383 406 } 384 407 385 408 static int check_status(u64 rsp_sts) ··· 545 522 return addr; 546 523 } 547 524 525 + /* 526 + * Queue complete sg list into the FIFO. 527 + * Returns 0 on error, 1 otherwise. 528 + */ 529 + static u64 prepare_dma_sg(struct cvm_mmc_host *host, struct mmc_data *data) 530 + { 531 + struct scatterlist *sg; 532 + u64 fifo_cmd, addr; 533 + int count, i, rw; 534 + 535 + count = dma_map_sg(host->dev, data->sg, data->sg_len, 536 + get_dma_dir(data)); 537 + if (!count) 538 + return 0; 539 + if (count > 16) 540 + goto error; 541 + 542 + /* Enable FIFO by removing CLR bit */ 543 + writeq(0, host->dma_base + MIO_EMM_DMA_FIFO_CFG(host)); 544 + 545 + for_each_sg(data->sg, sg, count, i) { 546 + /* Program DMA address */ 547 + addr = sg_dma_address(sg); 548 + if (addr & 7) 549 + goto error; 550 + writeq(addr, host->dma_base + MIO_EMM_DMA_FIFO_ADR(host)); 551 + 552 + /* 553 + * If we have scatter-gather support we also have an extra 554 + * register for the DMA addr, so no need to check 555 + * host->big_dma_addr here. 556 + */ 557 + rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0; 558 + fifo_cmd = FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_RW, rw); 559 + 560 + /* enable interrupts on the last element */ 561 + fifo_cmd |= FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_INTDIS, 562 + (i + 1 == count) ? 0 : 1); 563 + 564 + #ifdef __LITTLE_ENDIAN 565 + fifo_cmd |= FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_ENDIAN, 1); 566 + #endif 567 + fifo_cmd |= FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_SIZE, 568 + sg_dma_len(sg) / 8 - 1); 569 + /* 570 + * The write copies the address and the command to the FIFO 571 + * and increments the FIFO's COUNT field. 572 + */ 573 + writeq(fifo_cmd, host->dma_base + MIO_EMM_DMA_FIFO_CMD(host)); 574 + pr_debug("[%s] sg_dma_len: %u sg_elem: %d/%d\n", 575 + (rw) ? "W" : "R", sg_dma_len(sg), i, count); 576 + } 577 + 578 + /* 579 + * In difference to prepare_dma_single we don't return the 580 + * address here, as it would not make sense for scatter-gather. 581 + * The dma fixup is only required on models that don't support 582 + * scatter-gather, so that is not a problem. 583 + */ 584 + return 1; 585 + 586 + error: 587 + WARN_ON_ONCE(1); 588 + dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data)); 589 + /* Disable FIFO */ 590 + writeq(BIT_ULL(16), host->dma_base + MIO_EMM_DMA_FIFO_CFG(host)); 591 + return 0; 592 + } 593 + 548 594 static u64 prepare_dma(struct cvm_mmc_host *host, struct mmc_data *data) 549 595 { 550 - return prepare_dma_single(host, data); 596 + if (host->use_sg && data->sg_len > 1) 597 + return prepare_dma_sg(host, data); 598 + else 599 + return prepare_dma_single(host, data); 551 600 } 552 601 553 602 static u64 prepare_ext_dma(struct mmc_host *mmc, struct mmc_request *mrq) ··· 1035 940 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | 1036 941 MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_POWER_OFF_CARD; 1037 942 1038 - mmc->max_segs = 1; 943 + if (host->use_sg) 944 + mmc->max_segs = 16; 945 + else 946 + mmc->max_segs = 1; 1039 947 1040 948 /* DMA size field can address up to 8 MB */ 1041 949 mmc->max_seg_size = 8 * 1024 * 1024;
+22 -6
drivers/mmc/host/cavium.h
··· 23 23 #define CAVIUM_MAX_MMC 4 24 24 25 25 /* DMA register addresses */ 26 - #define MIO_EMM_DMA_CFG(x) (0x00 + x->reg_off_dma) 27 - #define MIO_EMM_DMA_ADR(x) (0x08 + x->reg_off_dma) 28 - #define MIO_EMM_DMA_INT(x) (0x10 + x->reg_off_dma) 29 - #define MIO_EMM_DMA_INT_W1S(x) (0x18 + x->reg_off_dma) 30 - #define MIO_EMM_DMA_INT_ENA_W1S(x) (0x20 + x->reg_off_dma) 31 - #define MIO_EMM_DMA_INT_ENA_W1C(x) (0x28 + x->reg_off_dma) 26 + #define MIO_EMM_DMA_FIFO_CFG(x) (0x00 + x->reg_off_dma) 27 + #define MIO_EMM_DMA_FIFO_ADR(x) (0x10 + x->reg_off_dma) 28 + #define MIO_EMM_DMA_FIFO_CMD(x) (0x18 + x->reg_off_dma) 29 + #define MIO_EMM_DMA_CFG(x) (0x20 + x->reg_off_dma) 30 + #define MIO_EMM_DMA_ADR(x) (0x28 + x->reg_off_dma) 31 + #define MIO_EMM_DMA_INT(x) (0x30 + x->reg_off_dma) 32 + #define MIO_EMM_DMA_INT_W1S(x) (0x38 + x->reg_off_dma) 33 + #define MIO_EMM_DMA_INT_ENA_W1S(x) (0x40 + x->reg_off_dma) 34 + #define MIO_EMM_DMA_INT_ENA_W1C(x) (0x48 + x->reg_off_dma) 32 35 33 36 /* register addresses */ 34 37 #define MIO_EMM_CFG(x) (0x00 + x->reg_off) ··· 67 64 struct mmc_request *current_req; 68 65 struct sg_mapping_iter smi; 69 66 bool dma_active; 67 + bool use_sg; 70 68 71 69 bool has_ciu3; 72 70 bool big_dma_addr; ··· 117 113 }; 118 114 119 115 /* Bitfield definitions */ 116 + #define MIO_EMM_DMA_FIFO_CFG_CLR BIT_ULL(16) 117 + #define MIO_EMM_DMA_FIFO_CFG_INT_LVL GENMASK_ULL(12, 8) 118 + #define MIO_EMM_DMA_FIFO_CFG_COUNT GENMASK_ULL(4, 0) 119 + 120 + #define MIO_EMM_DMA_FIFO_CMD_RW BIT_ULL(62) 121 + #define MIO_EMM_DMA_FIFO_CMD_INTDIS BIT_ULL(60) 122 + #define MIO_EMM_DMA_FIFO_CMD_SWAP32 BIT_ULL(59) 123 + #define MIO_EMM_DMA_FIFO_CMD_SWAP16 BIT_ULL(58) 124 + #define MIO_EMM_DMA_FIFO_CMD_SWAP8 BIT_ULL(57) 125 + #define MIO_EMM_DMA_FIFO_CMD_ENDIAN BIT_ULL(56) 126 + #define MIO_EMM_DMA_FIFO_CMD_SIZE GENMASK_ULL(55, 36) 127 + 120 128 #define MIO_EMM_CMD_SKIP_BUSY BIT_ULL(62) 121 129 #define MIO_EMM_CMD_BUS_ID GENMASK_ULL(61, 60) 122 130 #define MIO_EMM_CMD_VAL BIT_ULL(59)