Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mmc: sdhci-uhs2: add irq() and others

This is a UHS-II version of sdhci's request() operation.
It handles UHS-II related command interrupts and errors.

Signed-off-by: Ben Chuang <ben.chuang@genesyslogic.com.tw>
Signed-off-by: AKASHI Takahiro <takahiro.akashi@linaro.org>
Signed-off-by: Victor Shih <victor.shih@genesyslogic.com.tw>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
Message-ID: <20241018105333.4569-12-victorshihgli@gmail.com>
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>

authored by

Victor Shih and committed by
Ulf Hansson
fca267f0 4f412f79

+298 -48
+227
drivers/mmc/host/sdhci-uhs2.c
··· 98 98 } 99 99 EXPORT_SYMBOL_GPL(sdhci_uhs2_reset); 100 100 101 + static void sdhci_uhs2_reset_cmd_data(struct sdhci_host *host) 102 + { 103 + sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 104 + 105 + if (host->mmc->uhs2_sd_tran) { 106 + sdhci_uhs2_reset(host, SDHCI_UHS2_SW_RESET_SD); 107 + 108 + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 109 + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 110 + sdhci_uhs2_clear_set_irqs(host, SDHCI_INT_ALL_MASK, SDHCI_UHS2_INT_ERROR_MASK); 111 + } 112 + } 113 + 101 114 void sdhci_uhs2_set_power(struct sdhci_host *host, unsigned char mode, unsigned short vdd) 102 115 { 103 116 struct mmc_host *mmc = host->mmc; ··· 544 531 545 532 /*****************************************************************************\ 546 533 * * 534 + * Request done * 535 + * * 536 + \*****************************************************************************/ 537 + 538 + static bool sdhci_uhs2_needs_reset(struct sdhci_host *host, struct mmc_request *mrq) 539 + { 540 + return sdhci_needs_reset(host, mrq) || 541 + (!(host->flags & SDHCI_DEVICE_DEAD) && mrq->data && mrq->data->error); 542 + } 543 + 544 + static bool sdhci_uhs2_request_done(struct sdhci_host *host) 545 + { 546 + unsigned long flags; 547 + struct mmc_request *mrq; 548 + int i; 549 + 550 + spin_lock_irqsave(&host->lock, flags); 551 + 552 + for (i = 0; i < SDHCI_MAX_MRQS; i++) { 553 + mrq = host->mrqs_done[i]; 554 + if (mrq) 555 + break; 556 + } 557 + 558 + if (!mrq) { 559 + spin_unlock_irqrestore(&host->lock, flags); 560 + return true; 561 + } 562 + 563 + /* 564 + * Always unmap the data buffers if they were mapped by 565 + * sdhci_prepare_data() whenever we finish with a request. 566 + * This avoids leaking DMA mappings on error. 567 + */ 568 + if (host->flags & SDHCI_REQ_USE_DMA) 569 + sdhci_request_done_dma(host, mrq); 570 + 571 + /* 572 + * The controller needs a reset of internal state machines 573 + * upon error conditions. 574 + */ 575 + if (sdhci_uhs2_needs_reset(host, mrq)) { 576 + /* 577 + * Do not finish until command and data lines are available for 578 + * reset. Note there can only be one other mrq, so it cannot 579 + * also be in mrqs_done, otherwise host->cmd and host->data_cmd 580 + * would both be null. 581 + */ 582 + if (host->cmd || host->data_cmd) { 583 + spin_unlock_irqrestore(&host->lock, flags); 584 + return true; 585 + } 586 + 587 + if (mrq->cmd->error || mrq->data->error) 588 + sdhci_uhs2_reset_cmd_data(host); 589 + else 590 + sdhci_uhs2_reset(host, SDHCI_UHS2_SW_RESET_SD); 591 + host->pending_reset = false; 592 + } 593 + 594 + host->mrqs_done[i] = NULL; 595 + 596 + spin_unlock_irqrestore(&host->lock, flags); 597 + 598 + if (host->ops->request_done) 599 + host->ops->request_done(host, mrq); 600 + else 601 + mmc_request_done(host->mmc, mrq); 602 + 603 + return false; 604 + } 605 + 606 + static void sdhci_uhs2_complete_work(struct work_struct *work) 607 + { 608 + struct sdhci_host *host = container_of(work, struct sdhci_host, 609 + complete_work); 610 + 611 + if (!mmc_card_uhs2(host->mmc)) { 612 + sdhci_complete_work(work); 613 + return; 614 + } 615 + 616 + while (!sdhci_uhs2_request_done(host)) 617 + ; 618 + } 619 + 620 + /*****************************************************************************\ 621 + * * 622 + * Interrupt handling * 623 + * * 624 + \*****************************************************************************/ 625 + 626 + static void __sdhci_uhs2_irq(struct sdhci_host *host, u32 uhs2mask) 627 + { 628 + DBG("*** %s got UHS2 error interrupt: 0x%08x\n", 629 + mmc_hostname(host->mmc), uhs2mask); 630 + 631 + if (uhs2mask & SDHCI_UHS2_INT_CMD_ERR_MASK) { 632 + if (!host->cmd) { 633 + pr_err("%s: Got cmd interrupt 0x%08x but no cmd.\n", 634 + mmc_hostname(host->mmc), 635 + (unsigned int)uhs2mask); 636 + sdhci_dumpregs(host); 637 + return; 638 + } 639 + host->cmd->error = -EILSEQ; 640 + if (uhs2mask & SDHCI_UHS2_INT_CMD_TIMEOUT) 641 + host->cmd->error = -ETIMEDOUT; 642 + } 643 + 644 + if (uhs2mask & SDHCI_UHS2_INT_DATA_ERR_MASK) { 645 + if (!host->data) { 646 + pr_err("%s: Got data interrupt 0x%08x but no data.\n", 647 + mmc_hostname(host->mmc), 648 + (unsigned int)uhs2mask); 649 + sdhci_dumpregs(host); 650 + return; 651 + } 652 + 653 + if (uhs2mask & SDHCI_UHS2_INT_DEADLOCK_TIMEOUT) { 654 + pr_err("%s: Got deadlock timeout interrupt 0x%08x\n", 655 + mmc_hostname(host->mmc), 656 + (unsigned int)uhs2mask); 657 + host->data->error = -ETIMEDOUT; 658 + } else if (uhs2mask & SDHCI_UHS2_INT_ADMA_ERROR) { 659 + pr_err("%s: ADMA error = 0x %x\n", 660 + mmc_hostname(host->mmc), 661 + sdhci_readb(host, SDHCI_ADMA_ERROR)); 662 + host->data->error = -EIO; 663 + } else { 664 + host->data->error = -EILSEQ; 665 + } 666 + } 667 + } 668 + 669 + u32 sdhci_uhs2_irq(struct sdhci_host *host, u32 intmask) 670 + { 671 + u32 mask = intmask, uhs2mask; 672 + 673 + if (!mmc_card_uhs2(host->mmc)) 674 + goto out; 675 + 676 + if (intmask & SDHCI_INT_ERROR) { 677 + uhs2mask = sdhci_readl(host, SDHCI_UHS2_INT_STATUS); 678 + if (!(uhs2mask & SDHCI_UHS2_INT_ERROR_MASK)) 679 + goto cmd_irq; 680 + 681 + /* Clear error interrupts */ 682 + sdhci_writel(host, uhs2mask & SDHCI_UHS2_INT_ERROR_MASK, 683 + SDHCI_UHS2_INT_STATUS); 684 + 685 + /* Handle error interrupts */ 686 + __sdhci_uhs2_irq(host, uhs2mask); 687 + 688 + /* Caller, sdhci_irq(), doesn't have to care about UHS-2 errors */ 689 + intmask &= ~SDHCI_INT_ERROR; 690 + mask &= SDHCI_INT_ERROR; 691 + } 692 + 693 + cmd_irq: 694 + if (intmask & SDHCI_INT_CMD_MASK) { 695 + /* Clear command interrupt */ 696 + sdhci_writel(host, intmask & SDHCI_INT_CMD_MASK, SDHCI_INT_STATUS); 697 + 698 + /* Caller, sdhci_irq(), doesn't have to care about UHS-2 commands */ 699 + intmask &= ~SDHCI_INT_CMD_MASK; 700 + mask &= SDHCI_INT_CMD_MASK; 701 + } 702 + 703 + /* Clear already-handled interrupts. */ 704 + sdhci_writel(host, mask, SDHCI_INT_STATUS); 705 + 706 + out: 707 + return intmask; 708 + } 709 + EXPORT_SYMBOL_GPL(sdhci_uhs2_irq); 710 + 711 + static irqreturn_t sdhci_uhs2_thread_irq(int irq, void *dev_id) 712 + { 713 + struct sdhci_host *host = dev_id; 714 + struct mmc_command *cmd; 715 + unsigned long flags; 716 + u32 isr; 717 + 718 + if (!mmc_card_uhs2(host->mmc)) 719 + return sdhci_thread_irq(irq, dev_id); 720 + 721 + while (!sdhci_uhs2_request_done(host)) 722 + ; 723 + 724 + spin_lock_irqsave(&host->lock, flags); 725 + 726 + isr = host->thread_isr; 727 + host->thread_isr = 0; 728 + 729 + cmd = host->deferred_cmd; 730 + 731 + spin_unlock_irqrestore(&host->lock, flags); 732 + 733 + if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { 734 + struct mmc_host *mmc = host->mmc; 735 + 736 + mmc->ops->card_event(mmc); 737 + mmc_detect_change(mmc, msecs_to_jiffies(200)); 738 + } 739 + 740 + return IRQ_HANDLED; 741 + } 742 + 743 + /*****************************************************************************\ 744 + * * 547 745 * Driver init/exit * 548 746 * * 549 747 \*****************************************************************************/ ··· 843 619 /* overwrite ops */ 844 620 if (mmc->caps2 & MMC_CAP2_SD_UHS2) 845 621 sdhci_uhs2_host_ops_init(host); 622 + 623 + host->complete_work_fn = sdhci_uhs2_complete_work; 624 + host->thread_irq_fn = sdhci_uhs2_thread_irq; 846 625 847 626 /* LED support not implemented for UHS2 */ 848 627 host->quirks |= SDHCI_QUIRK_NO_LED;
+2
drivers/mmc/host/sdhci-uhs2.h
··· 174 174 175 175 struct sdhci_host; 176 176 struct mmc_command; 177 + struct mmc_request; 177 178 178 179 void sdhci_uhs2_dump_regs(struct sdhci_host *host); 179 180 void sdhci_uhs2_reset(struct sdhci_host *host, u16 mask); ··· 183 182 int sdhci_uhs2_add_host(struct sdhci_host *host); 184 183 void sdhci_uhs2_remove_host(struct sdhci_host *host, int dead); 185 184 void sdhci_uhs2_clear_set_irqs(struct sdhci_host *host, u32 clear, u32 set); 185 + u32 sdhci_uhs2_irq(struct sdhci_host *host, u32 intmask); 186 186 187 187 #endif /* __SDHCI_UHS2_H */
+61 -48
drivers/mmc/host/sdhci.c
··· 234 234 } 235 235 EXPORT_SYMBOL_GPL(sdhci_reset); 236 236 237 - static bool sdhci_do_reset(struct sdhci_host *host, u8 mask) 237 + bool sdhci_do_reset(struct sdhci_host *host, u8 mask) 238 238 { 239 239 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) { 240 240 struct mmc_host *mmc = host->mmc; ··· 247 247 248 248 return true; 249 249 } 250 + EXPORT_SYMBOL_GPL(sdhci_do_reset); 250 251 251 252 static void sdhci_reset_for_all(struct sdhci_host *host) 252 253 { ··· 1490 1489 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE); 1491 1490 } 1492 1491 1493 - static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq) 1492 + bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq) 1494 1493 { 1495 1494 return (!(host->flags & SDHCI_DEVICE_DEAD) && 1496 1495 ((mrq->cmd && mrq->cmd->error) || ··· 1498 1497 (mrq->data && mrq->data->stop && mrq->data->stop->error) || 1499 1498 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))); 1500 1499 } 1500 + EXPORT_SYMBOL_GPL(sdhci_needs_reset); 1501 1501 1502 1502 static void sdhci_set_mrq_done(struct sdhci_host *host, struct mmc_request *mrq) 1503 1503 { ··· 3078 3076 * * 3079 3077 \*****************************************************************************/ 3080 3078 3079 + void sdhci_request_done_dma(struct sdhci_host *host, struct mmc_request *mrq) 3080 + { 3081 + struct mmc_data *data = mrq->data; 3082 + 3083 + if (data && data->host_cookie == COOKIE_MAPPED) { 3084 + if (host->bounce_buffer) { 3085 + /* 3086 + * On reads, copy the bounced data into the 3087 + * sglist 3088 + */ 3089 + if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) { 3090 + unsigned int length = data->bytes_xfered; 3091 + 3092 + if (length > host->bounce_buffer_size) { 3093 + pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n", 3094 + mmc_hostname(host->mmc), 3095 + host->bounce_buffer_size, 3096 + data->bytes_xfered); 3097 + /* Cap it down and continue */ 3098 + length = host->bounce_buffer_size; 3099 + } 3100 + dma_sync_single_for_cpu(mmc_dev(host->mmc), 3101 + host->bounce_addr, 3102 + host->bounce_buffer_size, 3103 + DMA_FROM_DEVICE); 3104 + sg_copy_from_buffer(data->sg, 3105 + data->sg_len, 3106 + host->bounce_buffer, 3107 + length); 3108 + } else { 3109 + /* No copying, just switch ownership */ 3110 + dma_sync_single_for_cpu(mmc_dev(host->mmc), 3111 + host->bounce_addr, 3112 + host->bounce_buffer_size, 3113 + mmc_get_dma_dir(data)); 3114 + } 3115 + } else { 3116 + /* Unmap the raw data */ 3117 + dma_unmap_sg(mmc_dev(host->mmc), data->sg, 3118 + data->sg_len, 3119 + mmc_get_dma_dir(data)); 3120 + } 3121 + data->host_cookie = COOKIE_UNMAPPED; 3122 + } 3123 + } 3124 + EXPORT_SYMBOL_GPL(sdhci_request_done_dma); 3125 + 3081 3126 static bool sdhci_request_done(struct sdhci_host *host) 3082 3127 { 3083 3128 unsigned long flags; ··· 3189 3140 sdhci_set_mrq_done(host, mrq); 3190 3141 } 3191 3142 3192 - if (data && data->host_cookie == COOKIE_MAPPED) { 3193 - if (host->bounce_buffer) { 3194 - /* 3195 - * On reads, copy the bounced data into the 3196 - * sglist 3197 - */ 3198 - if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) { 3199 - unsigned int length = data->bytes_xfered; 3200 - 3201 - if (length > host->bounce_buffer_size) { 3202 - pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n", 3203 - mmc_hostname(host->mmc), 3204 - host->bounce_buffer_size, 3205 - data->bytes_xfered); 3206 - /* Cap it down and continue */ 3207 - length = host->bounce_buffer_size; 3208 - } 3209 - dma_sync_single_for_cpu( 3210 - mmc_dev(host->mmc), 3211 - host->bounce_addr, 3212 - host->bounce_buffer_size, 3213 - DMA_FROM_DEVICE); 3214 - sg_copy_from_buffer(data->sg, 3215 - data->sg_len, 3216 - host->bounce_buffer, 3217 - length); 3218 - } else { 3219 - /* No copying, just switch ownership */ 3220 - dma_sync_single_for_cpu( 3221 - mmc_dev(host->mmc), 3222 - host->bounce_addr, 3223 - host->bounce_buffer_size, 3224 - mmc_get_dma_dir(data)); 3225 - } 3226 - } else { 3227 - /* Unmap the raw data */ 3228 - dma_unmap_sg(mmc_dev(host->mmc), data->sg, 3229 - data->sg_len, 3230 - mmc_get_dma_dir(data)); 3231 - } 3232 - data->host_cookie = COOKIE_UNMAPPED; 3233 - } 3143 + sdhci_request_done_dma(host, mrq); 3234 3144 } 3235 3145 3236 3146 host->mrqs_done[i] = NULL; ··· 3204 3196 return false; 3205 3197 } 3206 3198 3207 - static void sdhci_complete_work(struct work_struct *work) 3199 + void sdhci_complete_work(struct work_struct *work) 3208 3200 { 3209 3201 struct sdhci_host *host = container_of(work, struct sdhci_host, 3210 3202 complete_work); ··· 3212 3204 while (!sdhci_request_done(host)) 3213 3205 ; 3214 3206 } 3207 + EXPORT_SYMBOL_GPL(sdhci_complete_work); 3215 3208 3216 3209 static void sdhci_timeout_timer(struct timer_list *t) 3217 3210 { ··· 3674 3665 return result; 3675 3666 } 3676 3667 3677 - static irqreturn_t sdhci_thread_irq(int irq, void *dev_id) 3668 + irqreturn_t sdhci_thread_irq(int irq, void *dev_id) 3678 3669 { 3679 3670 struct sdhci_host *host = dev_id; 3680 3671 struct mmc_command *cmd; ··· 3704 3695 3705 3696 return IRQ_HANDLED; 3706 3697 } 3698 + EXPORT_SYMBOL_GPL(sdhci_thread_irq); 3707 3699 3708 3700 /*****************************************************************************\ 3709 3701 * * ··· 4076 4066 host->max_adma = 65536; 4077 4067 4078 4068 host->max_timeout_count = 0xE; 4069 + 4070 + host->complete_work_fn = sdhci_complete_work; 4071 + host->thread_irq_fn = sdhci_thread_irq; 4079 4072 4080 4073 return host; 4081 4074 } ··· 4844 4831 if (!host->complete_wq) 4845 4832 return -ENOMEM; 4846 4833 4847 - INIT_WORK(&host->complete_work, sdhci_complete_work); 4834 + INIT_WORK(&host->complete_work, host->complete_work_fn); 4848 4835 4849 4836 timer_setup(&host->timer, sdhci_timeout_timer, 0); 4850 4837 timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0); ··· 4853 4840 4854 4841 sdhci_init(host, 0); 4855 4842 4856 - ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq, 4843 + ret = request_threaded_irq(host->irq, sdhci_irq, host->thread_irq_fn, 4857 4844 IRQF_SHARED, mmc_hostname(mmc), host); 4858 4845 if (ret) { 4859 4846 pr_err("%s: Failed to request IRQ %d: %d\n",
+8
drivers/mmc/host/sdhci.h
··· 625 625 struct timer_list timer; /* Timer for timeouts */ 626 626 struct timer_list data_timer; /* Timer for data timeouts */ 627 627 628 + void (*complete_work_fn)(struct work_struct *work); 629 + irqreturn_t (*thread_irq_fn)(int irq, void *dev_id); 630 + 628 631 #if IS_ENABLED(CONFIG_MMC_SDHCI_EXTERNAL_DMA) 629 632 struct dma_chan *rx_chan; 630 633 struct dma_chan *tx_chan; ··· 830 827 __sdhci_read_caps(host, NULL, NULL, NULL); 831 828 } 832 829 830 + bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq); 833 831 u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock, 834 832 unsigned int *actual_clock); 835 833 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock); ··· 849 845 int sdhci_request_atomic(struct mmc_host *mmc, struct mmc_request *mrq); 850 846 void sdhci_set_bus_width(struct sdhci_host *host, int width); 851 847 void sdhci_reset(struct sdhci_host *host, u8 mask); 848 + bool sdhci_do_reset(struct sdhci_host *host, u8 mask); 852 849 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing); 853 850 int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode); 854 851 int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode); ··· 859 854 int sdhci_start_signal_voltage_switch(struct mmc_host *mmc, 860 855 struct mmc_ios *ios); 861 856 void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable); 857 + void sdhci_request_done_dma(struct sdhci_host *host, struct mmc_request *mrq); 858 + void sdhci_complete_work(struct work_struct *work); 859 + irqreturn_t sdhci_thread_irq(int irq, void *dev_id); 862 860 void sdhci_adma_write_desc(struct sdhci_host *host, void **desc, 863 861 dma_addr_t addr, int len, unsigned int cmd); 864 862