Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mmc: rtsx: add support for pre_req and post_req

Add support for non-blocking request, pre_req() runs dma_map_sg() and
post_req() runs dma_unmap_sg(). This patch can increase card read/write
speed, especially for high speed card and slow CPU(for some embedded
platform).

Users can get a great benefit from this patch. if CPU frequency is 800MHz,
SDR104 or DDR50 card read/write speed may increase more than 15%.

test results:
intel i3(800MHz - 2.3GHz), SD card clock 208MHz

performance mode(2.3GHz):
Before:
dd if=/dev/mmcblk0p1 of=/dev/null bs=64k count=1024
67108864 bytes (67 MB) copied, 1.18191 s, 56.8 MB/s
After:
dd if=/dev/mmcblk0p1 of=/dev/null bs=64k count=1024
67108864 bytes (67 MB) copied, 1.09276 s, 61.4 MB/s

powersave mode(800MHz):
Before:
dd if=/dev/mmcblk0p1 of=/dev/null bs=64k count=1024
67108864 bytes (67 MB) copied, 1.29569 s, 51.8 MB/s
After:
dd if=/dev/mmcblk0p1 of=/dev/null bs=64k count=1024
67108864 bytes (67 MB) copied, 1.11218 s, 60.3 MB/s

Signed-off-by: Micky Ching <micky_ching@realsil.com.cn>
Signed-off-by: Chris Ball <chris@printf.net>

authored by

Micky Ching and committed by
Chris Ball
c42deffd abcc6b29

+447 -108
+91 -41
drivers/mfd/rtsx_pcr.c
··· 338 338 int num_sg, bool read, int timeout) 339 339 { 340 340 struct completion trans_done; 341 - u8 dir; 342 - int err = 0, i, count; 341 + int err = 0, count; 343 342 long timeleft; 344 343 unsigned long flags; 345 - struct scatterlist *sg; 346 - enum dma_data_direction dma_dir; 347 - u32 val; 348 - dma_addr_t addr; 349 - unsigned int len; 350 344 351 - dev_dbg(&(pcr->pci->dev), "--> %s: num_sg = %d\n", __func__, num_sg); 352 - 353 - /* don't transfer data during abort processing */ 354 - if (pcr->remove_pci) 355 - return -EINVAL; 356 - 357 - if ((sglist == NULL) || (num_sg <= 0)) 358 - return -EINVAL; 359 - 360 - if (read) { 361 - dir = DEVICE_TO_HOST; 362 - dma_dir = DMA_FROM_DEVICE; 363 - } else { 364 - dir = HOST_TO_DEVICE; 365 - dma_dir = DMA_TO_DEVICE; 366 - } 367 - 368 - count = dma_map_sg(&(pcr->pci->dev), sglist, num_sg, dma_dir); 345 + count = rtsx_pci_dma_map_sg(pcr, sglist, num_sg, read); 369 346 if (count < 1) { 370 347 dev_err(&(pcr->pci->dev), "scatterlist map failed\n"); 371 348 return -EINVAL; 372 349 } 373 350 dev_dbg(&(pcr->pci->dev), "DMA mapping count: %d\n", count); 374 351 375 - val = ((u32)(dir & 0x01) << 29) | TRIG_DMA | ADMA_MODE; 376 - pcr->sgi = 0; 377 - for_each_sg(sglist, sg, count, i) { 378 - addr = sg_dma_address(sg); 379 - len = sg_dma_len(sg); 380 - rtsx_pci_add_sg_tbl(pcr, addr, len, i == count - 1); 381 - } 382 352 383 353 spin_lock_irqsave(&pcr->lock, flags); 384 354 385 355 pcr->done = &trans_done; 386 356 pcr->trans_result = TRANS_NOT_READY; 387 357 init_completion(&trans_done); 388 - rtsx_pci_writel(pcr, RTSX_HDBAR, pcr->host_sg_tbl_addr); 389 - rtsx_pci_writel(pcr, RTSX_HDBCTLR, val); 390 358 391 359 spin_unlock_irqrestore(&pcr->lock, flags); 360 + 361 + rtsx_pci_dma_transfer(pcr, sglist, count, read); 392 362 393 363 timeleft = wait_for_completion_interruptible_timeout( 394 364 &trans_done, msecs_to_jiffies(timeout)); ··· 383 413 pcr->done = NULL; 384 414 spin_unlock_irqrestore(&pcr->lock, flags); 385 415 386 - dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dma_dir); 416 + rtsx_pci_dma_unmap_sg(pcr, sglist, num_sg, read); 387 417 388 418 if ((err < 0) && (err != -ENODEV)) 389 419 rtsx_pci_stop_cmd(pcr); ··· 394 424 return err; 395 425 } 396 426 EXPORT_SYMBOL_GPL(rtsx_pci_transfer_data); 427 + 428 + int rtsx_pci_dma_map_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist, 429 + int num_sg, bool read) 430 + { 431 + enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 432 + 433 + if (pcr->remove_pci) 434 + return -EINVAL; 435 + 436 + if ((sglist == NULL) || num_sg < 1) 437 + return -EINVAL; 438 + 439 + return dma_map_sg(&(pcr->pci->dev), sglist, num_sg, dir); 440 + } 441 + EXPORT_SYMBOL_GPL(rtsx_pci_dma_map_sg); 442 + 443 + int rtsx_pci_dma_unmap_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist, 444 + int num_sg, bool read) 445 + { 446 + enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 447 + 448 + if (pcr->remove_pci) 449 + return -EINVAL; 450 + 451 + if (sglist == NULL || num_sg < 1) 452 + return -EINVAL; 453 + 454 + dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dir); 455 + return num_sg; 456 + } 457 + EXPORT_SYMBOL_GPL(rtsx_pci_dma_unmap_sg); 458 + 459 + int rtsx_pci_dma_transfer(struct rtsx_pcr *pcr, struct scatterlist *sglist, 460 + int sg_count, bool read) 461 + { 462 + struct scatterlist *sg; 463 + dma_addr_t addr; 464 + unsigned int len; 465 + int i; 466 + u32 val; 467 + u8 dir = read ? DEVICE_TO_HOST : HOST_TO_DEVICE; 468 + unsigned long flags; 469 + 470 + if (pcr->remove_pci) 471 + return -EINVAL; 472 + 473 + if ((sglist == NULL) || (sg_count < 1)) 474 + return -EINVAL; 475 + 476 + val = ((u32)(dir & 0x01) << 29) | TRIG_DMA | ADMA_MODE; 477 + pcr->sgi = 0; 478 + for_each_sg(sglist, sg, sg_count, i) { 479 + addr = sg_dma_address(sg); 480 + len = sg_dma_len(sg); 481 + rtsx_pci_add_sg_tbl(pcr, addr, len, i == sg_count - 1); 482 + } 483 + 484 + spin_lock_irqsave(&pcr->lock, flags); 485 + 486 + rtsx_pci_writel(pcr, RTSX_HDBAR, pcr->host_sg_tbl_addr); 487 + rtsx_pci_writel(pcr, RTSX_HDBCTLR, val); 488 + 489 + spin_unlock_irqrestore(&pcr->lock, flags); 490 + 491 + return 0; 492 + } 493 + EXPORT_SYMBOL_GPL(rtsx_pci_dma_transfer); 397 494 398 495 int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len) 399 496 { ··· 873 836 int_reg = rtsx_pci_readl(pcr, RTSX_BIPR); 874 837 /* Clear interrupt flag */ 875 838 rtsx_pci_writel(pcr, RTSX_BIPR, int_reg); 839 + dev_dbg(&pcr->pci->dev, "=========== BIPR 0x%8x ==========\n", int_reg); 840 + 876 841 if ((int_reg & pcr->bier) == 0) { 877 842 spin_unlock(&pcr->lock); 878 843 return IRQ_NONE; ··· 905 866 } 906 867 907 868 if (int_reg & (NEED_COMPLETE_INT | DELINK_INT)) { 908 - if (int_reg & (TRANS_FAIL_INT | DELINK_INT)) { 869 + if (int_reg & (TRANS_FAIL_INT | DELINK_INT)) 909 870 pcr->trans_result = TRANS_RESULT_FAIL; 910 - if (pcr->done) 911 - complete(pcr->done); 912 - } else if (int_reg & TRANS_OK_INT) { 871 + else if (int_reg & TRANS_OK_INT) 913 872 pcr->trans_result = TRANS_RESULT_OK; 914 - if (pcr->done) 915 - complete(pcr->done); 873 + 874 + if (pcr->done) 875 + complete(pcr->done); 876 + 877 + if (int_reg & SD_EXIST) { 878 + struct rtsx_slot *slot = &pcr->slots[RTSX_SD_CARD]; 879 + if (slot && slot->done_transfer) 880 + slot->done_transfer(slot->p_dev); 881 + } 882 + 883 + if (int_reg & MS_EXIST) { 884 + struct rtsx_slot *slot = &pcr->slots[RTSX_SD_CARD]; 885 + if (slot && slot->done_transfer) 886 + slot->done_transfer(slot->p_dev); 916 887 } 917 888 } 889 + 918 890 919 891 if (pcr->card_inserted || pcr->card_removed) 920 892 schedule_delayed_work(&pcr->carddet_work,
+349 -67
drivers/mmc/host/rtsx_pci_sdmmc.c
··· 31 31 #include <linux/mfd/rtsx_pci.h> 32 32 #include <asm/unaligned.h> 33 33 34 + struct realtek_next { 35 + unsigned int sg_count; 36 + s32 cookie; 37 + }; 38 + 34 39 struct realtek_pci_sdmmc { 35 40 struct platform_device *pdev; 36 41 struct rtsx_pcr *pcr; 37 42 struct mmc_host *mmc; 38 43 struct mmc_request *mrq; 44 + struct mmc_command *cmd; 45 + struct mmc_data *data; 39 46 40 - struct mutex host_mutex; 47 + spinlock_t lock; 48 + struct timer_list timer; 49 + struct tasklet_struct cmd_tasklet; 50 + struct tasklet_struct data_tasklet; 51 + struct tasklet_struct finish_tasklet; 41 52 53 + u8 rsp_type; 54 + u8 rsp_len; 55 + int sg_count; 42 56 u8 ssc_depth; 43 57 unsigned int clock; 44 58 bool vpclk; ··· 62 48 int power_state; 63 49 #define SDMMC_POWER_ON 1 64 50 #define SDMMC_POWER_OFF 0 51 + 52 + struct realtek_next next_data; 65 53 }; 54 + 55 + static int sd_start_multi_rw(struct realtek_pci_sdmmc *host, 56 + struct mmc_request *mrq); 66 57 67 58 static inline struct device *sdmmc_dev(struct realtek_pci_sdmmc *host) 68 59 { ··· 104 85 #else 105 86 #define sd_print_debug_regs(host) 106 87 #endif /* DEBUG */ 88 + 89 + static void sd_isr_done_transfer(struct platform_device *pdev) 90 + { 91 + struct realtek_pci_sdmmc *host = platform_get_drvdata(pdev); 92 + 93 + spin_lock(&host->lock); 94 + if (host->cmd) 95 + tasklet_schedule(&host->cmd_tasklet); 96 + if (host->data) 97 + tasklet_schedule(&host->data_tasklet); 98 + spin_unlock(&host->lock); 99 + } 100 + 101 + static void sd_request_timeout(unsigned long host_addr) 102 + { 103 + struct realtek_pci_sdmmc *host = (struct realtek_pci_sdmmc *)host_addr; 104 + unsigned long flags; 105 + 106 + spin_lock_irqsave(&host->lock, flags); 107 + 108 + if (!host->mrq) { 109 + dev_err(sdmmc_dev(host), "error: no request exist\n"); 110 + goto out; 111 + } 112 + 113 + if (host->cmd) 114 + host->cmd->error = -ETIMEDOUT; 115 + if (host->data) 116 + host->data->error = -ETIMEDOUT; 117 + 118 + dev_dbg(sdmmc_dev(host), "timeout for request\n"); 119 + 120 + out: 121 + tasklet_schedule(&host->finish_tasklet); 122 + spin_unlock_irqrestore(&host->lock, flags); 123 + } 124 + 125 + static void sd_finish_request(unsigned long host_addr) 126 + { 127 + struct realtek_pci_sdmmc *host = (struct realtek_pci_sdmmc *)host_addr; 128 + struct rtsx_pcr *pcr = host->pcr; 129 + struct mmc_request *mrq; 130 + struct mmc_command *cmd; 131 + struct mmc_data *data; 132 + unsigned long flags; 133 + bool any_error; 134 + 135 + spin_lock_irqsave(&host->lock, flags); 136 + 137 + del_timer(&host->timer); 138 + mrq = host->mrq; 139 + if (!mrq) { 140 + dev_err(sdmmc_dev(host), "error: no request need finish\n"); 141 + goto out; 142 + } 143 + 144 + cmd = mrq->cmd; 145 + data = mrq->data; 146 + 147 + any_error = (mrq->sbc && mrq->sbc->error) || 148 + (mrq->stop && mrq->stop->error) || 149 + (cmd && cmd->error) || (data && data->error); 150 + 151 + if (any_error) { 152 + rtsx_pci_stop_cmd(pcr); 153 + sd_clear_error(host); 154 + } 155 + 156 + if (data) { 157 + if (any_error) 158 + data->bytes_xfered = 0; 159 + else 160 + data->bytes_xfered = data->blocks * data->blksz; 161 + 162 + if (!data->host_cookie) 163 + rtsx_pci_dma_unmap_sg(pcr, data->sg, data->sg_len, 164 + data->flags & MMC_DATA_READ); 165 + 166 + } 167 + 168 + host->mrq = NULL; 169 + host->cmd = NULL; 170 + host->data = NULL; 171 + 172 + out: 173 + spin_unlock_irqrestore(&host->lock, flags); 174 + mutex_unlock(&pcr->pcr_mutex); 175 + mmc_request_done(host->mmc, mrq); 176 + } 107 177 108 178 static int sd_read_data(struct realtek_pci_sdmmc *host, u8 *cmd, u16 byte_cnt, 109 179 u8 *buf, int buf_len, int timeout) ··· 311 203 return 0; 312 204 } 313 205 314 - static void sd_send_cmd_get_rsp(struct realtek_pci_sdmmc *host, 315 - struct mmc_command *cmd) 206 + static void sd_send_cmd(struct realtek_pci_sdmmc *host, struct mmc_command *cmd) 316 207 { 317 208 struct rtsx_pcr *pcr = host->pcr; 318 209 u8 cmd_idx = (u8)cmd->opcode; ··· 319 212 int err = 0; 320 213 int timeout = 100; 321 214 int i; 322 - u8 *ptr; 323 - int stat_idx = 0; 324 215 u8 rsp_type; 325 216 int rsp_len = 5; 326 - bool clock_toggled = false; 217 + unsigned long flags; 218 + 219 + if (host->cmd) 220 + dev_err(sdmmc_dev(host), "error: cmd already exist\n"); 221 + 222 + host->cmd = cmd; 327 223 328 224 dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD %d, arg = 0x%08x\n", 329 225 __func__, cmd_idx, arg); ··· 361 251 err = -EINVAL; 362 252 goto out; 363 253 } 254 + host->rsp_type = rsp_type; 255 + host->rsp_len = rsp_len; 364 256 365 257 if (rsp_type == SD_RSP_TYPE_R1b) 366 258 timeout = 3000; ··· 372 260 0xFF, SD_CLK_TOGGLE_EN); 373 261 if (err < 0) 374 262 goto out; 375 - 376 - clock_toggled = true; 377 263 } 378 264 379 265 rtsx_pci_init_cmd(pcr); ··· 395 285 /* Read data from ping-pong buffer */ 396 286 for (i = PPBUF_BASE2; i < PPBUF_BASE2 + 16; i++) 397 287 rtsx_pci_add_cmd(pcr, READ_REG_CMD, (u16)i, 0, 0); 398 - stat_idx = 16; 399 288 } else if (rsp_type != SD_RSP_TYPE_R0) { 400 289 /* Read data from SD_CMDx registers */ 401 290 for (i = SD_CMD0; i <= SD_CMD4; i++) 402 291 rtsx_pci_add_cmd(pcr, READ_REG_CMD, (u16)i, 0, 0); 403 - stat_idx = 5; 404 292 } 405 293 406 294 rtsx_pci_add_cmd(pcr, READ_REG_CMD, SD_STAT1, 0, 0); 407 295 408 - err = rtsx_pci_send_cmd(pcr, timeout); 409 - if (err < 0) { 410 - sd_print_debug_regs(host); 411 - sd_clear_error(host); 412 - dev_dbg(sdmmc_dev(host), 413 - "rtsx_pci_send_cmd error (err = %d)\n", err); 296 + mod_timer(&host->timer, jiffies + msecs_to_jiffies(timeout)); 297 + 298 + spin_lock_irqsave(&pcr->lock, flags); 299 + pcr->trans_result = TRANS_NOT_READY; 300 + rtsx_pci_send_cmd_no_wait(pcr); 301 + spin_unlock_irqrestore(&pcr->lock, flags); 302 + 303 + return; 304 + 305 + out: 306 + cmd->error = err; 307 + tasklet_schedule(&host->finish_tasklet); 308 + } 309 + 310 + static void sd_get_rsp(unsigned long host_addr) 311 + { 312 + struct realtek_pci_sdmmc *host = (struct realtek_pci_sdmmc *)host_addr; 313 + struct rtsx_pcr *pcr = host->pcr; 314 + struct mmc_command *cmd; 315 + int i, err = 0, stat_idx; 316 + u8 *ptr, rsp_type; 317 + unsigned long flags; 318 + 319 + spin_lock_irqsave(&host->lock, flags); 320 + 321 + cmd = host->cmd; 322 + host->cmd = NULL; 323 + 324 + if (!cmd) { 325 + dev_err(sdmmc_dev(host), "error: cmd not exist\n"); 414 326 goto out; 415 327 } 328 + 329 + spin_lock(&pcr->lock); 330 + if (pcr->trans_result == TRANS_NO_DEVICE) 331 + err = -ENODEV; 332 + else if (pcr->trans_result != TRANS_RESULT_OK) 333 + err = -EINVAL; 334 + spin_unlock(&pcr->lock); 335 + 336 + if (err < 0) 337 + goto out; 338 + 339 + rsp_type = host->rsp_type; 340 + stat_idx = host->rsp_len; 416 341 417 342 if (rsp_type == SD_RSP_TYPE_R0) { 418 343 err = 0; ··· 485 340 cmd->resp[0]); 486 341 } 487 342 343 + if (cmd == host->mrq->sbc) { 344 + sd_send_cmd(host, host->mrq->cmd); 345 + spin_unlock_irqrestore(&host->lock, flags); 346 + return; 347 + } 348 + 349 + if (cmd == host->mrq->stop) 350 + goto out; 351 + 352 + if (cmd->data) { 353 + sd_start_multi_rw(host, host->mrq); 354 + spin_unlock_irqrestore(&host->lock, flags); 355 + return; 356 + } 357 + 488 358 out: 489 359 cmd->error = err; 490 360 491 - if (err && clock_toggled) 492 - rtsx_pci_write_register(pcr, SD_BUS_STAT, 493 - SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP, 0); 361 + tasklet_schedule(&host->finish_tasklet); 362 + spin_unlock_irqrestore(&host->lock, flags); 494 363 } 495 364 496 - static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq) 365 + static int sd_pre_dma_transfer(struct realtek_pci_sdmmc *host, 366 + struct mmc_data *data, struct realtek_next *next) 367 + { 368 + struct rtsx_pcr *pcr = host->pcr; 369 + int read = data->flags & MMC_DATA_READ; 370 + int sg_count = 0; 371 + 372 + if (!next && data->host_cookie && 373 + data->host_cookie != host->next_data.cookie) { 374 + dev_err(sdmmc_dev(host), 375 + "error: invalid cookie data[%d] host[%d]\n", 376 + data->host_cookie, host->next_data.cookie); 377 + data->host_cookie = 0; 378 + } 379 + 380 + if (next || (!next && data->host_cookie != host->next_data.cookie)) 381 + sg_count = rtsx_pci_dma_map_sg(pcr, 382 + data->sg, data->sg_len, read); 383 + else 384 + sg_count = host->next_data.sg_count; 385 + 386 + if (next) { 387 + next->sg_count = sg_count; 388 + if (++next->cookie < 0) 389 + next->cookie = 1; 390 + data->host_cookie = next->cookie; 391 + } 392 + 393 + return sg_count; 394 + } 395 + 396 + static void sdmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq, 397 + bool is_first_req) 398 + { 399 + struct realtek_pci_sdmmc *host = mmc_priv(mmc); 400 + struct mmc_data *data = mrq->data; 401 + 402 + if (data->host_cookie) { 403 + dev_err(sdmmc_dev(host), 404 + "error: descard already cookie data[%d]\n", 405 + data->host_cookie); 406 + data->host_cookie = 0; 407 + } 408 + 409 + dev_dbg(sdmmc_dev(host), "dma sg prepared: %d\n", 410 + sd_pre_dma_transfer(host, data, &host->next_data)); 411 + } 412 + 413 + static void sdmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq, 414 + int err) 415 + { 416 + struct realtek_pci_sdmmc *host = mmc_priv(mmc); 417 + struct rtsx_pcr *pcr = host->pcr; 418 + struct mmc_data *data = mrq->data; 419 + int read = data->flags & MMC_DATA_READ; 420 + 421 + rtsx_pci_dma_unmap_sg(pcr, data->sg, data->sg_len, read); 422 + data->host_cookie = 0; 423 + } 424 + 425 + static int sd_start_multi_rw(struct realtek_pci_sdmmc *host, 426 + struct mmc_request *mrq) 497 427 { 498 428 struct rtsx_pcr *pcr = host->pcr; 499 429 struct mmc_host *mmc = host->mmc; 500 430 struct mmc_card *card = mmc->card; 501 431 struct mmc_data *data = mrq->data; 502 432 int uhs = mmc_card_uhs(card); 503 - int read = (data->flags & MMC_DATA_READ) ? 1 : 0; 433 + int read = data->flags & MMC_DATA_READ; 504 434 u8 cfg2, trans_mode; 505 435 int err; 506 436 size_t data_len = data->blksz * data->blocks; 437 + 438 + if (host->data) 439 + dev_err(sdmmc_dev(host), "error: data already exist\n"); 440 + 441 + host->data = data; 507 442 508 443 if (read) { 509 444 cfg2 = SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | ··· 635 410 rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, SD_TRANSFER, 636 411 SD_TRANSFER_END, SD_TRANSFER_END); 637 412 413 + mod_timer(&host->timer, jiffies + 10 * HZ); 638 414 rtsx_pci_send_cmd_no_wait(pcr); 639 415 640 - err = rtsx_pci_transfer_data(pcr, data->sg, data->sg_len, read, 10000); 416 + err = rtsx_pci_dma_transfer(pcr, data->sg, host->sg_count, read); 641 417 if (err < 0) { 642 - sd_clear_error(host); 643 - return err; 418 + data->error = err; 419 + tasklet_schedule(&host->finish_tasklet); 420 + } 421 + return 0; 422 + } 423 + 424 + static void sd_finish_multi_rw(unsigned long host_addr) 425 + { 426 + struct realtek_pci_sdmmc *host = (struct realtek_pci_sdmmc *)host_addr; 427 + struct rtsx_pcr *pcr = host->pcr; 428 + struct mmc_data *data; 429 + int err = 0; 430 + unsigned long flags; 431 + 432 + spin_lock_irqsave(&host->lock, flags); 433 + 434 + if (!host->data) { 435 + dev_err(sdmmc_dev(host), "error: no data exist\n"); 436 + goto out; 644 437 } 645 438 646 - return 0; 439 + data = host->data; 440 + host->data = NULL; 441 + 442 + if (pcr->trans_result == TRANS_NO_DEVICE) 443 + err = -ENODEV; 444 + else if (pcr->trans_result != TRANS_RESULT_OK) 445 + err = -EINVAL; 446 + 447 + if (err < 0) { 448 + data->error = err; 449 + goto out; 450 + } 451 + 452 + if (!host->mrq->sbc && data->stop) { 453 + sd_send_cmd(host, data->stop); 454 + spin_unlock_irqrestore(&host->lock, flags); 455 + return; 456 + } 457 + 458 + out: 459 + tasklet_schedule(&host->finish_tasklet); 460 + spin_unlock_irqrestore(&host->lock, flags); 647 461 } 648 462 649 463 static inline void sd_enable_initial_mode(struct realtek_pci_sdmmc *host) ··· 901 637 return 0; 902 638 } 903 639 640 + static inline bool sd_use_muti_rw(struct mmc_command *cmd) 641 + { 642 + return mmc_op_multi(cmd->opcode) || 643 + (cmd->opcode == MMC_READ_SINGLE_BLOCK) || 644 + (cmd->opcode == MMC_WRITE_BLOCK); 645 + } 646 + 904 647 static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq) 905 648 { 906 649 struct realtek_pci_sdmmc *host = mmc_priv(mmc); ··· 916 645 struct mmc_data *data = mrq->data; 917 646 unsigned int data_size = 0; 918 647 int err; 648 + unsigned long flags; 649 + 650 + mutex_lock(&pcr->pcr_mutex); 651 + spin_lock_irqsave(&host->lock, flags); 652 + 653 + if (host->mrq) 654 + dev_err(sdmmc_dev(host), "error: request already exist\n"); 655 + host->mrq = mrq; 919 656 920 657 if (host->eject) { 921 658 cmd->error = -ENOMEDIUM; ··· 936 657 goto finish; 937 658 } 938 659 939 - mutex_lock(&pcr->pcr_mutex); 940 - 941 660 rtsx_pci_start_run(pcr); 942 661 943 662 rtsx_pci_switch_clock(pcr, host->clock, host->ssc_depth, ··· 944 667 rtsx_pci_write_register(pcr, CARD_SHARE_MODE, 945 668 CARD_SHARE_MASK, CARD_SHARE_48_SD); 946 669 947 - mutex_lock(&host->host_mutex); 948 - host->mrq = mrq; 949 - mutex_unlock(&host->host_mutex); 950 - 951 670 if (mrq->data) 952 671 data_size = data->blocks * data->blksz; 953 672 954 - if (!data_size || mmc_op_multi(cmd->opcode) || 955 - (cmd->opcode == MMC_READ_SINGLE_BLOCK) || 956 - (cmd->opcode == MMC_WRITE_BLOCK)) { 957 - sd_send_cmd_get_rsp(host, cmd); 673 + if (sd_use_muti_rw(cmd)) 674 + host->sg_count = sd_pre_dma_transfer(host, data, NULL); 958 675 959 - if (!cmd->error && data_size) { 960 - sd_rw_multi(host, mrq); 961 - 962 - if (mmc_op_multi(cmd->opcode) && mrq->stop) 963 - sd_send_cmd_get_rsp(host, mrq->stop); 964 - } 965 - } else { 966 - sd_normal_rw(host, mrq); 967 - } 968 - 969 - if (mrq->data) { 970 - if (cmd->error || data->error) 971 - data->bytes_xfered = 0; 676 + if (!data_size || sd_use_muti_rw(cmd)) { 677 + if (mrq->sbc) 678 + sd_send_cmd(host, mrq->sbc); 972 679 else 973 - data->bytes_xfered = data->blocks * data->blksz; 680 + sd_send_cmd(host, cmd); 681 + spin_unlock_irqrestore(&host->lock, flags); 682 + } else { 683 + spin_unlock_irqrestore(&host->lock, flags); 684 + sd_normal_rw(host, mrq); 685 + tasklet_schedule(&host->finish_tasklet); 974 686 } 975 - 976 - mutex_unlock(&pcr->pcr_mutex); 687 + return; 977 688 978 689 finish: 979 - if (cmd->error) 980 - dev_dbg(sdmmc_dev(host), "cmd->error = %d\n", cmd->error); 981 - 982 - mutex_lock(&host->host_mutex); 983 - host->mrq = NULL; 984 - mutex_unlock(&host->host_mutex); 985 - 986 - mmc_request_done(mmc, mrq); 690 + tasklet_schedule(&host->finish_tasklet); 691 + spin_unlock_irqrestore(&host->lock, flags); 987 692 } 988 693 989 694 static int sd_set_bus_width(struct realtek_pci_sdmmc *host, ··· 1400 1141 } 1401 1142 1402 1143 static const struct mmc_host_ops realtek_pci_sdmmc_ops = { 1144 + .pre_req = sdmmc_pre_req, 1145 + .post_req = sdmmc_post_req, 1403 1146 .request = sdmmc_request, 1404 1147 .set_ios = sdmmc_set_ios, 1405 1148 .get_ro = sdmmc_get_ro, ··· 1465 1204 struct realtek_pci_sdmmc *host; 1466 1205 struct rtsx_pcr *pcr; 1467 1206 struct pcr_handle *handle = pdev->dev.platform_data; 1207 + unsigned long host_addr; 1468 1208 1469 1209 if (!handle) 1470 1210 return -ENXIO; ··· 1489 1227 pcr->slots[RTSX_SD_CARD].p_dev = pdev; 1490 1228 pcr->slots[RTSX_SD_CARD].card_event = rtsx_pci_sdmmc_card_event; 1491 1229 1492 - mutex_init(&host->host_mutex); 1230 + host_addr = (unsigned long)host; 1231 + host->next_data.cookie = 1; 1232 + setup_timer(&host->timer, sd_request_timeout, host_addr); 1233 + tasklet_init(&host->cmd_tasklet, sd_get_rsp, host_addr); 1234 + tasklet_init(&host->data_tasklet, sd_finish_multi_rw, host_addr); 1235 + tasklet_init(&host->finish_tasklet, sd_finish_request, host_addr); 1236 + spin_lock_init(&host->lock); 1493 1237 1238 + pcr->slots[RTSX_SD_CARD].done_transfer = sd_isr_done_transfer; 1494 1239 realtek_init_host(host); 1495 1240 1496 1241 mmc_add_host(mmc); ··· 1510 1241 struct realtek_pci_sdmmc *host = platform_get_drvdata(pdev); 1511 1242 struct rtsx_pcr *pcr; 1512 1243 struct mmc_host *mmc; 1244 + struct mmc_request *mrq; 1245 + unsigned long flags; 1513 1246 1514 1247 if (!host) 1515 1248 return 0; ··· 1519 1248 pcr = host->pcr; 1520 1249 pcr->slots[RTSX_SD_CARD].p_dev = NULL; 1521 1250 pcr->slots[RTSX_SD_CARD].card_event = NULL; 1251 + pcr->slots[RTSX_SD_CARD].done_transfer = NULL; 1522 1252 mmc = host->mmc; 1253 + mrq = host->mrq; 1523 1254 1524 - mutex_lock(&host->host_mutex); 1255 + spin_lock_irqsave(&host->lock, flags); 1525 1256 if (host->mrq) { 1526 1257 dev_dbg(&(pdev->dev), 1527 1258 "%s: Controller removed during transfer\n", 1528 1259 mmc_hostname(mmc)); 1529 1260 1530 - rtsx_pci_complete_unfinished_transfer(pcr); 1261 + if (mrq->sbc) 1262 + mrq->sbc->error = -ENOMEDIUM; 1263 + if (mrq->cmd) 1264 + mrq->cmd->error = -ENOMEDIUM; 1265 + if (mrq->stop) 1266 + mrq->stop->error = -ENOMEDIUM; 1267 + if (mrq->data) 1268 + mrq->data->error = -ENOMEDIUM; 1531 1269 1532 - host->mrq->cmd->error = -ENOMEDIUM; 1533 - if (host->mrq->stop) 1534 - host->mrq->stop->error = -ENOMEDIUM; 1535 - mmc_request_done(mmc, host->mrq); 1270 + tasklet_schedule(&host->finish_tasklet); 1536 1271 } 1537 - mutex_unlock(&host->host_mutex); 1272 + spin_unlock_irqrestore(&host->lock, flags); 1273 + 1274 + del_timer_sync(&host->timer); 1275 + tasklet_kill(&host->cmd_tasklet); 1276 + tasklet_kill(&host->data_tasklet); 1277 + tasklet_kill(&host->finish_tasklet); 1538 1278 1539 1279 mmc_remove_host(mmc); 1540 1280 host->eject = true;
+1
include/linux/mfd/rtsx_common.h
··· 45 45 struct rtsx_slot { 46 46 struct platform_device *p_dev; 47 47 void (*card_event)(struct platform_device *p_dev); 48 + void (*done_transfer)(struct platform_device *p_dev); 48 49 }; 49 50 50 51 #endif
+6
include/linux/mfd/rtsx_pci.h
··· 943 943 int rtsx_pci_send_cmd(struct rtsx_pcr *pcr, int timeout); 944 944 int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist, 945 945 int num_sg, bool read, int timeout); 946 + int rtsx_pci_dma_map_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist, 947 + int num_sg, bool read); 948 + int rtsx_pci_dma_unmap_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist, 949 + int num_sg, bool read); 950 + int rtsx_pci_dma_transfer(struct rtsx_pcr *pcr, struct scatterlist *sglist, 951 + int sg_count, bool read); 946 952 int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len); 947 953 int rtsx_pci_write_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len); 948 954 int rtsx_pci_card_pull_ctl_enable(struct rtsx_pcr *pcr, int card);