Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v5.7 1159 lines 29 kB view raw
1// SPDX-License-Identifier: GPL-2.0-only 2/* Copyright (c) 2015, The Linux Foundation. All rights reserved. 3 */ 4 5#include <linux/delay.h> 6#include <linux/highmem.h> 7#include <linux/io.h> 8#include <linux/iopoll.h> 9#include <linux/module.h> 10#include <linux/dma-mapping.h> 11#include <linux/slab.h> 12#include <linux/scatterlist.h> 13#include <linux/platform_device.h> 14#include <linux/ktime.h> 15 16#include <linux/mmc/mmc.h> 17#include <linux/mmc/host.h> 18#include <linux/mmc/card.h> 19 20#include "cqhci.h" 21 22#define DCMD_SLOT 31 23#define NUM_SLOTS 32 24 25struct cqhci_slot { 26 struct mmc_request *mrq; 27 unsigned int flags; 28#define CQHCI_EXTERNAL_TIMEOUT BIT(0) 29#define CQHCI_COMPLETED BIT(1) 30#define CQHCI_HOST_CRC BIT(2) 31#define CQHCI_HOST_TIMEOUT BIT(3) 32#define CQHCI_HOST_OTHER BIT(4) 33}; 34 35static inline u8 *get_desc(struct cqhci_host *cq_host, u8 tag) 36{ 37 return cq_host->desc_base + (tag * cq_host->slot_sz); 38} 39 40static inline u8 *get_link_desc(struct cqhci_host *cq_host, u8 tag) 41{ 42 u8 *desc = get_desc(cq_host, tag); 43 44 return desc + cq_host->task_desc_len; 45} 46 47static inline dma_addr_t get_trans_desc_dma(struct cqhci_host *cq_host, u8 tag) 48{ 49 return cq_host->trans_desc_dma_base + 50 (cq_host->mmc->max_segs * tag * 51 cq_host->trans_desc_len); 52} 53 54static inline u8 *get_trans_desc(struct cqhci_host *cq_host, u8 tag) 55{ 56 return cq_host->trans_desc_base + 57 (cq_host->trans_desc_len * cq_host->mmc->max_segs * tag); 58} 59 60static void setup_trans_desc(struct cqhci_host *cq_host, u8 tag) 61{ 62 u8 *link_temp; 63 dma_addr_t trans_temp; 64 65 link_temp = get_link_desc(cq_host, tag); 66 trans_temp = get_trans_desc_dma(cq_host, tag); 67 68 memset(link_temp, 0, cq_host->link_desc_len); 69 if (cq_host->link_desc_len > 8) 70 *(link_temp + 8) = 0; 71 72 if (tag == DCMD_SLOT && (cq_host->mmc->caps2 & MMC_CAP2_CQE_DCMD)) { 73 *link_temp = CQHCI_VALID(0) | CQHCI_ACT(0) | CQHCI_END(1); 74 return; 75 } 76 77 *link_temp = CQHCI_VALID(1) | CQHCI_ACT(0x6) | CQHCI_END(0); 78 79 if (cq_host->dma64) { 80 __le64 *data_addr = (__le64 __force *)(link_temp + 4); 81 82 data_addr[0] = cpu_to_le64(trans_temp); 83 } else { 84 __le32 *data_addr = (__le32 __force *)(link_temp + 4); 85 86 data_addr[0] = cpu_to_le32(trans_temp); 87 } 88} 89 90static void cqhci_set_irqs(struct cqhci_host *cq_host, u32 set) 91{ 92 cqhci_writel(cq_host, set, CQHCI_ISTE); 93 cqhci_writel(cq_host, set, CQHCI_ISGE); 94} 95 96#define DRV_NAME "cqhci" 97 98#define CQHCI_DUMP(f, x...) \ 99 pr_err("%s: " DRV_NAME ": " f, mmc_hostname(mmc), ## x) 100 101static void cqhci_dumpregs(struct cqhci_host *cq_host) 102{ 103 struct mmc_host *mmc = cq_host->mmc; 104 105 CQHCI_DUMP("============ CQHCI REGISTER DUMP ===========\n"); 106 107 CQHCI_DUMP("Caps: 0x%08x | Version: 0x%08x\n", 108 cqhci_readl(cq_host, CQHCI_CAP), 109 cqhci_readl(cq_host, CQHCI_VER)); 110 CQHCI_DUMP("Config: 0x%08x | Control: 0x%08x\n", 111 cqhci_readl(cq_host, CQHCI_CFG), 112 cqhci_readl(cq_host, CQHCI_CTL)); 113 CQHCI_DUMP("Int stat: 0x%08x | Int enab: 0x%08x\n", 114 cqhci_readl(cq_host, CQHCI_IS), 115 cqhci_readl(cq_host, CQHCI_ISTE)); 116 CQHCI_DUMP("Int sig: 0x%08x | Int Coal: 0x%08x\n", 117 cqhci_readl(cq_host, CQHCI_ISGE), 118 cqhci_readl(cq_host, CQHCI_IC)); 119 CQHCI_DUMP("TDL base: 0x%08x | TDL up32: 0x%08x\n", 120 cqhci_readl(cq_host, CQHCI_TDLBA), 121 cqhci_readl(cq_host, CQHCI_TDLBAU)); 122 CQHCI_DUMP("Doorbell: 0x%08x | TCN: 0x%08x\n", 123 cqhci_readl(cq_host, CQHCI_TDBR), 124 cqhci_readl(cq_host, CQHCI_TCN)); 125 CQHCI_DUMP("Dev queue: 0x%08x | Dev Pend: 0x%08x\n", 126 cqhci_readl(cq_host, CQHCI_DQS), 127 cqhci_readl(cq_host, CQHCI_DPT)); 128 CQHCI_DUMP("Task clr: 0x%08x | SSC1: 0x%08x\n", 129 cqhci_readl(cq_host, CQHCI_TCLR), 130 cqhci_readl(cq_host, CQHCI_SSC1)); 131 CQHCI_DUMP("SSC2: 0x%08x | DCMD rsp: 0x%08x\n", 132 cqhci_readl(cq_host, CQHCI_SSC2), 133 cqhci_readl(cq_host, CQHCI_CRDCT)); 134 CQHCI_DUMP("RED mask: 0x%08x | TERRI: 0x%08x\n", 135 cqhci_readl(cq_host, CQHCI_RMEM), 136 cqhci_readl(cq_host, CQHCI_TERRI)); 137 CQHCI_DUMP("Resp idx: 0x%08x | Resp arg: 0x%08x\n", 138 cqhci_readl(cq_host, CQHCI_CRI), 139 cqhci_readl(cq_host, CQHCI_CRA)); 140 141 if (cq_host->ops->dumpregs) 142 cq_host->ops->dumpregs(mmc); 143 else 144 CQHCI_DUMP(": ===========================================\n"); 145} 146 147/** 148 * The allocated descriptor table for task, link & transfer descritors 149 * looks like: 150 * |----------| 151 * |task desc | |->|----------| 152 * |----------| | |trans desc| 153 * |link desc-|->| |----------| 154 * |----------| . 155 * . . 156 * no. of slots max-segs 157 * . |----------| 158 * |----------| 159 * The idea here is to create the [task+trans] table and mark & point the 160 * link desc to the transfer desc table on a per slot basis. 161 */ 162static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host) 163{ 164 int i = 0; 165 166 /* task descriptor can be 64/128 bit irrespective of arch */ 167 if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) { 168 cqhci_writel(cq_host, cqhci_readl(cq_host, CQHCI_CFG) | 169 CQHCI_TASK_DESC_SZ, CQHCI_CFG); 170 cq_host->task_desc_len = 16; 171 } else { 172 cq_host->task_desc_len = 8; 173 } 174 175 /* 176 * 96 bits length of transfer desc instead of 128 bits which means 177 * ADMA would expect next valid descriptor at the 96th bit 178 * or 128th bit 179 */ 180 if (cq_host->dma64) { 181 if (cq_host->quirks & CQHCI_QUIRK_SHORT_TXFR_DESC_SZ) 182 cq_host->trans_desc_len = 12; 183 else 184 cq_host->trans_desc_len = 16; 185 cq_host->link_desc_len = 16; 186 } else { 187 cq_host->trans_desc_len = 8; 188 cq_host->link_desc_len = 8; 189 } 190 191 /* total size of a slot: 1 task & 1 transfer (link) */ 192 cq_host->slot_sz = cq_host->task_desc_len + cq_host->link_desc_len; 193 194 cq_host->desc_size = cq_host->slot_sz * cq_host->num_slots; 195 196 cq_host->data_size = cq_host->trans_desc_len * cq_host->mmc->max_segs * 197 cq_host->mmc->cqe_qdepth; 198 199 pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n", 200 mmc_hostname(cq_host->mmc), cq_host->desc_size, cq_host->data_size, 201 cq_host->slot_sz); 202 203 /* 204 * allocate a dma-mapped chunk of memory for the descriptors 205 * allocate a dma-mapped chunk of memory for link descriptors 206 * setup each link-desc memory offset per slot-number to 207 * the descriptor table. 208 */ 209 cq_host->desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc), 210 cq_host->desc_size, 211 &cq_host->desc_dma_base, 212 GFP_KERNEL); 213 if (!cq_host->desc_base) 214 return -ENOMEM; 215 216 cq_host->trans_desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc), 217 cq_host->data_size, 218 &cq_host->trans_desc_dma_base, 219 GFP_KERNEL); 220 if (!cq_host->trans_desc_base) { 221 dmam_free_coherent(mmc_dev(cq_host->mmc), cq_host->desc_size, 222 cq_host->desc_base, 223 cq_host->desc_dma_base); 224 cq_host->desc_base = NULL; 225 cq_host->desc_dma_base = 0; 226 return -ENOMEM; 227 } 228 229 pr_debug("%s: cqhci: desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n", 230 mmc_hostname(cq_host->mmc), cq_host->desc_base, cq_host->trans_desc_base, 231 (unsigned long long)cq_host->desc_dma_base, 232 (unsigned long long)cq_host->trans_desc_dma_base); 233 234 for (; i < (cq_host->num_slots); i++) 235 setup_trans_desc(cq_host, i); 236 237 return 0; 238} 239 240static void __cqhci_enable(struct cqhci_host *cq_host) 241{ 242 struct mmc_host *mmc = cq_host->mmc; 243 u32 cqcfg; 244 245 cqcfg = cqhci_readl(cq_host, CQHCI_CFG); 246 247 /* Configuration must not be changed while enabled */ 248 if (cqcfg & CQHCI_ENABLE) { 249 cqcfg &= ~CQHCI_ENABLE; 250 cqhci_writel(cq_host, cqcfg, CQHCI_CFG); 251 } 252 253 cqcfg &= ~(CQHCI_DCMD | CQHCI_TASK_DESC_SZ); 254 255 if (mmc->caps2 & MMC_CAP2_CQE_DCMD) 256 cqcfg |= CQHCI_DCMD; 257 258 if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) 259 cqcfg |= CQHCI_TASK_DESC_SZ; 260 261 cqhci_writel(cq_host, cqcfg, CQHCI_CFG); 262 263 cqhci_writel(cq_host, lower_32_bits(cq_host->desc_dma_base), 264 CQHCI_TDLBA); 265 cqhci_writel(cq_host, upper_32_bits(cq_host->desc_dma_base), 266 CQHCI_TDLBAU); 267 268 cqhci_writel(cq_host, cq_host->rca, CQHCI_SSC2); 269 270 cqhci_set_irqs(cq_host, 0); 271 272 cqcfg |= CQHCI_ENABLE; 273 274 cqhci_writel(cq_host, cqcfg, CQHCI_CFG); 275 276 mmc->cqe_on = true; 277 278 if (cq_host->ops->enable) 279 cq_host->ops->enable(mmc); 280 281 /* Ensure all writes are done before interrupts are enabled */ 282 wmb(); 283 284 cqhci_set_irqs(cq_host, CQHCI_IS_MASK); 285 286 cq_host->activated = true; 287} 288 289static void __cqhci_disable(struct cqhci_host *cq_host) 290{ 291 u32 cqcfg; 292 293 cqcfg = cqhci_readl(cq_host, CQHCI_CFG); 294 cqcfg &= ~CQHCI_ENABLE; 295 cqhci_writel(cq_host, cqcfg, CQHCI_CFG); 296 297 cq_host->mmc->cqe_on = false; 298 299 cq_host->activated = false; 300} 301 302int cqhci_deactivate(struct mmc_host *mmc) 303{ 304 struct cqhci_host *cq_host = mmc->cqe_private; 305 306 if (cq_host->enabled && cq_host->activated) 307 __cqhci_disable(cq_host); 308 309 return 0; 310} 311EXPORT_SYMBOL(cqhci_deactivate); 312 313int cqhci_resume(struct mmc_host *mmc) 314{ 315 /* Re-enable is done upon first request */ 316 return 0; 317} 318EXPORT_SYMBOL(cqhci_resume); 319 320static int cqhci_enable(struct mmc_host *mmc, struct mmc_card *card) 321{ 322 struct cqhci_host *cq_host = mmc->cqe_private; 323 int err; 324 325 if (!card->ext_csd.cmdq_en) 326 return -EINVAL; 327 328 if (cq_host->enabled) 329 return 0; 330 331 cq_host->rca = card->rca; 332 333 err = cqhci_host_alloc_tdl(cq_host); 334 if (err) { 335 pr_err("%s: Failed to enable CQE, error %d\n", 336 mmc_hostname(mmc), err); 337 return err; 338 } 339 340 __cqhci_enable(cq_host); 341 342 cq_host->enabled = true; 343 344#ifdef DEBUG 345 cqhci_dumpregs(cq_host); 346#endif 347 return 0; 348} 349 350/* CQHCI is idle and should halt immediately, so set a small timeout */ 351#define CQHCI_OFF_TIMEOUT 100 352 353static u32 cqhci_read_ctl(struct cqhci_host *cq_host) 354{ 355 return cqhci_readl(cq_host, CQHCI_CTL); 356} 357 358static void cqhci_off(struct mmc_host *mmc) 359{ 360 struct cqhci_host *cq_host = mmc->cqe_private; 361 u32 reg; 362 int err; 363 364 if (!cq_host->enabled || !mmc->cqe_on || cq_host->recovery_halt) 365 return; 366 367 if (cq_host->ops->disable) 368 cq_host->ops->disable(mmc, false); 369 370 cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL); 371 372 err = readx_poll_timeout(cqhci_read_ctl, cq_host, reg, 373 reg & CQHCI_HALT, 0, CQHCI_OFF_TIMEOUT); 374 if (err < 0) 375 pr_err("%s: cqhci: CQE stuck on\n", mmc_hostname(mmc)); 376 else 377 pr_debug("%s: cqhci: CQE off\n", mmc_hostname(mmc)); 378 379 mmc->cqe_on = false; 380} 381 382static void cqhci_disable(struct mmc_host *mmc) 383{ 384 struct cqhci_host *cq_host = mmc->cqe_private; 385 386 if (!cq_host->enabled) 387 return; 388 389 cqhci_off(mmc); 390 391 __cqhci_disable(cq_host); 392 393 dmam_free_coherent(mmc_dev(mmc), cq_host->data_size, 394 cq_host->trans_desc_base, 395 cq_host->trans_desc_dma_base); 396 397 dmam_free_coherent(mmc_dev(mmc), cq_host->desc_size, 398 cq_host->desc_base, 399 cq_host->desc_dma_base); 400 401 cq_host->trans_desc_base = NULL; 402 cq_host->desc_base = NULL; 403 404 cq_host->enabled = false; 405} 406 407static void cqhci_prep_task_desc(struct mmc_request *mrq, 408 u64 *data, bool intr) 409{ 410 u32 req_flags = mrq->data->flags; 411 412 *data = CQHCI_VALID(1) | 413 CQHCI_END(1) | 414 CQHCI_INT(intr) | 415 CQHCI_ACT(0x5) | 416 CQHCI_FORCED_PROG(!!(req_flags & MMC_DATA_FORCED_PRG)) | 417 CQHCI_DATA_TAG(!!(req_flags & MMC_DATA_DAT_TAG)) | 418 CQHCI_DATA_DIR(!!(req_flags & MMC_DATA_READ)) | 419 CQHCI_PRIORITY(!!(req_flags & MMC_DATA_PRIO)) | 420 CQHCI_QBAR(!!(req_flags & MMC_DATA_QBR)) | 421 CQHCI_REL_WRITE(!!(req_flags & MMC_DATA_REL_WR)) | 422 CQHCI_BLK_COUNT(mrq->data->blocks) | 423 CQHCI_BLK_ADDR((u64)mrq->data->blk_addr); 424 425 pr_debug("%s: cqhci: tag %d task descriptor 0x016%llx\n", 426 mmc_hostname(mrq->host), mrq->tag, (unsigned long long)*data); 427} 428 429static int cqhci_dma_map(struct mmc_host *host, struct mmc_request *mrq) 430{ 431 int sg_count; 432 struct mmc_data *data = mrq->data; 433 434 if (!data) 435 return -EINVAL; 436 437 sg_count = dma_map_sg(mmc_dev(host), data->sg, 438 data->sg_len, 439 (data->flags & MMC_DATA_WRITE) ? 440 DMA_TO_DEVICE : DMA_FROM_DEVICE); 441 if (!sg_count) { 442 pr_err("%s: sg-len: %d\n", __func__, data->sg_len); 443 return -ENOMEM; 444 } 445 446 return sg_count; 447} 448 449static void cqhci_set_tran_desc(u8 *desc, dma_addr_t addr, int len, bool end, 450 bool dma64) 451{ 452 __le32 *attr = (__le32 __force *)desc; 453 454 *attr = (CQHCI_VALID(1) | 455 CQHCI_END(end ? 1 : 0) | 456 CQHCI_INT(0) | 457 CQHCI_ACT(0x4) | 458 CQHCI_DAT_LENGTH(len)); 459 460 if (dma64) { 461 __le64 *dataddr = (__le64 __force *)(desc + 4); 462 463 dataddr[0] = cpu_to_le64(addr); 464 } else { 465 __le32 *dataddr = (__le32 __force *)(desc + 4); 466 467 dataddr[0] = cpu_to_le32(addr); 468 } 469} 470 471static int cqhci_prep_tran_desc(struct mmc_request *mrq, 472 struct cqhci_host *cq_host, int tag) 473{ 474 struct mmc_data *data = mrq->data; 475 int i, sg_count, len; 476 bool end = false; 477 bool dma64 = cq_host->dma64; 478 dma_addr_t addr; 479 u8 *desc; 480 struct scatterlist *sg; 481 482 sg_count = cqhci_dma_map(mrq->host, mrq); 483 if (sg_count < 0) { 484 pr_err("%s: %s: unable to map sg lists, %d\n", 485 mmc_hostname(mrq->host), __func__, sg_count); 486 return sg_count; 487 } 488 489 desc = get_trans_desc(cq_host, tag); 490 491 for_each_sg(data->sg, sg, sg_count, i) { 492 addr = sg_dma_address(sg); 493 len = sg_dma_len(sg); 494 495 if ((i+1) == sg_count) 496 end = true; 497 cqhci_set_tran_desc(desc, addr, len, end, dma64); 498 desc += cq_host->trans_desc_len; 499 } 500 501 return 0; 502} 503 504static void cqhci_prep_dcmd_desc(struct mmc_host *mmc, 505 struct mmc_request *mrq) 506{ 507 u64 *task_desc = NULL; 508 u64 data = 0; 509 u8 resp_type; 510 u8 *desc; 511 __le64 *dataddr; 512 struct cqhci_host *cq_host = mmc->cqe_private; 513 u8 timing; 514 515 if (!(mrq->cmd->flags & MMC_RSP_PRESENT)) { 516 resp_type = 0x0; 517 timing = 0x1; 518 } else { 519 if (mrq->cmd->flags & MMC_RSP_R1B) { 520 resp_type = 0x3; 521 timing = 0x0; 522 } else { 523 resp_type = 0x2; 524 timing = 0x1; 525 } 526 } 527 528 task_desc = (__le64 __force *)get_desc(cq_host, cq_host->dcmd_slot); 529 memset(task_desc, 0, cq_host->task_desc_len); 530 data |= (CQHCI_VALID(1) | 531 CQHCI_END(1) | 532 CQHCI_INT(1) | 533 CQHCI_QBAR(1) | 534 CQHCI_ACT(0x5) | 535 CQHCI_CMD_INDEX(mrq->cmd->opcode) | 536 CQHCI_CMD_TIMING(timing) | CQHCI_RESP_TYPE(resp_type)); 537 if (cq_host->ops->update_dcmd_desc) 538 cq_host->ops->update_dcmd_desc(mmc, mrq, &data); 539 *task_desc |= data; 540 desc = (u8 *)task_desc; 541 pr_debug("%s: cqhci: dcmd: cmd: %d timing: %d resp: %d\n", 542 mmc_hostname(mmc), mrq->cmd->opcode, timing, resp_type); 543 dataddr = (__le64 __force *)(desc + 4); 544 dataddr[0] = cpu_to_le64((u64)mrq->cmd->arg); 545 546} 547 548static void cqhci_post_req(struct mmc_host *host, struct mmc_request *mrq) 549{ 550 struct mmc_data *data = mrq->data; 551 552 if (data) { 553 dma_unmap_sg(mmc_dev(host), data->sg, data->sg_len, 554 (data->flags & MMC_DATA_READ) ? 555 DMA_FROM_DEVICE : DMA_TO_DEVICE); 556 } 557} 558 559static inline int cqhci_tag(struct mmc_request *mrq) 560{ 561 return mrq->cmd ? DCMD_SLOT : mrq->tag; 562} 563 564static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq) 565{ 566 int err = 0; 567 u64 data = 0; 568 u64 *task_desc = NULL; 569 int tag = cqhci_tag(mrq); 570 struct cqhci_host *cq_host = mmc->cqe_private; 571 unsigned long flags; 572 573 if (!cq_host->enabled) { 574 pr_err("%s: cqhci: not enabled\n", mmc_hostname(mmc)); 575 return -EINVAL; 576 } 577 578 /* First request after resume has to re-enable */ 579 if (!cq_host->activated) 580 __cqhci_enable(cq_host); 581 582 if (!mmc->cqe_on) { 583 cqhci_writel(cq_host, 0, CQHCI_CTL); 584 mmc->cqe_on = true; 585 pr_debug("%s: cqhci: CQE on\n", mmc_hostname(mmc)); 586 if (cqhci_readl(cq_host, CQHCI_CTL) && CQHCI_HALT) { 587 pr_err("%s: cqhci: CQE failed to exit halt state\n", 588 mmc_hostname(mmc)); 589 } 590 if (cq_host->ops->enable) 591 cq_host->ops->enable(mmc); 592 } 593 594 if (mrq->data) { 595 task_desc = (__le64 __force *)get_desc(cq_host, tag); 596 cqhci_prep_task_desc(mrq, &data, 1); 597 *task_desc = cpu_to_le64(data); 598 err = cqhci_prep_tran_desc(mrq, cq_host, tag); 599 if (err) { 600 pr_err("%s: cqhci: failed to setup tx desc: %d\n", 601 mmc_hostname(mmc), err); 602 return err; 603 } 604 } else { 605 cqhci_prep_dcmd_desc(mmc, mrq); 606 } 607 608 spin_lock_irqsave(&cq_host->lock, flags); 609 610 if (cq_host->recovery_halt) { 611 err = -EBUSY; 612 goto out_unlock; 613 } 614 615 cq_host->slot[tag].mrq = mrq; 616 cq_host->slot[tag].flags = 0; 617 618 cq_host->qcnt += 1; 619 /* Make sure descriptors are ready before ringing the doorbell */ 620 wmb(); 621 cqhci_writel(cq_host, 1 << tag, CQHCI_TDBR); 622 if (!(cqhci_readl(cq_host, CQHCI_TDBR) & (1 << tag))) 623 pr_debug("%s: cqhci: doorbell not set for tag %d\n", 624 mmc_hostname(mmc), tag); 625out_unlock: 626 spin_unlock_irqrestore(&cq_host->lock, flags); 627 628 if (err) 629 cqhci_post_req(mmc, mrq); 630 631 return err; 632} 633 634static void cqhci_recovery_needed(struct mmc_host *mmc, struct mmc_request *mrq, 635 bool notify) 636{ 637 struct cqhci_host *cq_host = mmc->cqe_private; 638 639 if (!cq_host->recovery_halt) { 640 cq_host->recovery_halt = true; 641 pr_debug("%s: cqhci: recovery needed\n", mmc_hostname(mmc)); 642 wake_up(&cq_host->wait_queue); 643 if (notify && mrq->recovery_notifier) 644 mrq->recovery_notifier(mrq); 645 } 646} 647 648static unsigned int cqhci_error_flags(int error1, int error2) 649{ 650 int error = error1 ? error1 : error2; 651 652 switch (error) { 653 case -EILSEQ: 654 return CQHCI_HOST_CRC; 655 case -ETIMEDOUT: 656 return CQHCI_HOST_TIMEOUT; 657 default: 658 return CQHCI_HOST_OTHER; 659 } 660} 661 662static void cqhci_error_irq(struct mmc_host *mmc, u32 status, int cmd_error, 663 int data_error) 664{ 665 struct cqhci_host *cq_host = mmc->cqe_private; 666 struct cqhci_slot *slot; 667 u32 terri; 668 int tag; 669 670 spin_lock(&cq_host->lock); 671 672 terri = cqhci_readl(cq_host, CQHCI_TERRI); 673 674 pr_debug("%s: cqhci: error IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n", 675 mmc_hostname(mmc), status, cmd_error, data_error, terri); 676 677 /* Forget about errors when recovery has already been triggered */ 678 if (cq_host->recovery_halt) 679 goto out_unlock; 680 681 if (!cq_host->qcnt) { 682 WARN_ONCE(1, "%s: cqhci: error when idle. IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n", 683 mmc_hostname(mmc), status, cmd_error, data_error, 684 terri); 685 goto out_unlock; 686 } 687 688 if (CQHCI_TERRI_C_VALID(terri)) { 689 tag = CQHCI_TERRI_C_TASK(terri); 690 slot = &cq_host->slot[tag]; 691 if (slot->mrq) { 692 slot->flags = cqhci_error_flags(cmd_error, data_error); 693 cqhci_recovery_needed(mmc, slot->mrq, true); 694 } 695 } 696 697 if (CQHCI_TERRI_D_VALID(terri)) { 698 tag = CQHCI_TERRI_D_TASK(terri); 699 slot = &cq_host->slot[tag]; 700 if (slot->mrq) { 701 slot->flags = cqhci_error_flags(data_error, cmd_error); 702 cqhci_recovery_needed(mmc, slot->mrq, true); 703 } 704 } 705 706 if (!cq_host->recovery_halt) { 707 /* 708 * The only way to guarantee forward progress is to mark at 709 * least one task in error, so if none is indicated, pick one. 710 */ 711 for (tag = 0; tag < NUM_SLOTS; tag++) { 712 slot = &cq_host->slot[tag]; 713 if (!slot->mrq) 714 continue; 715 slot->flags = cqhci_error_flags(data_error, cmd_error); 716 cqhci_recovery_needed(mmc, slot->mrq, true); 717 break; 718 } 719 } 720 721out_unlock: 722 spin_unlock(&cq_host->lock); 723} 724 725static void cqhci_finish_mrq(struct mmc_host *mmc, unsigned int tag) 726{ 727 struct cqhci_host *cq_host = mmc->cqe_private; 728 struct cqhci_slot *slot = &cq_host->slot[tag]; 729 struct mmc_request *mrq = slot->mrq; 730 struct mmc_data *data; 731 732 if (!mrq) { 733 WARN_ONCE(1, "%s: cqhci: spurious TCN for tag %d\n", 734 mmc_hostname(mmc), tag); 735 return; 736 } 737 738 /* No completions allowed during recovery */ 739 if (cq_host->recovery_halt) { 740 slot->flags |= CQHCI_COMPLETED; 741 return; 742 } 743 744 slot->mrq = NULL; 745 746 cq_host->qcnt -= 1; 747 748 data = mrq->data; 749 if (data) { 750 if (data->error) 751 data->bytes_xfered = 0; 752 else 753 data->bytes_xfered = data->blksz * data->blocks; 754 } 755 756 mmc_cqe_request_done(mmc, mrq); 757} 758 759irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error, 760 int data_error) 761{ 762 u32 status; 763 unsigned long tag = 0, comp_status; 764 struct cqhci_host *cq_host = mmc->cqe_private; 765 766 status = cqhci_readl(cq_host, CQHCI_IS); 767 cqhci_writel(cq_host, status, CQHCI_IS); 768 769 pr_debug("%s: cqhci: IRQ status: 0x%08x\n", mmc_hostname(mmc), status); 770 771 if ((status & CQHCI_IS_RED) || cmd_error || data_error) 772 cqhci_error_irq(mmc, status, cmd_error, data_error); 773 774 if (status & CQHCI_IS_TCC) { 775 /* read TCN and complete the request */ 776 comp_status = cqhci_readl(cq_host, CQHCI_TCN); 777 cqhci_writel(cq_host, comp_status, CQHCI_TCN); 778 pr_debug("%s: cqhci: TCN: 0x%08lx\n", 779 mmc_hostname(mmc), comp_status); 780 781 spin_lock(&cq_host->lock); 782 783 for_each_set_bit(tag, &comp_status, cq_host->num_slots) { 784 /* complete the corresponding mrq */ 785 pr_debug("%s: cqhci: completing tag %lu\n", 786 mmc_hostname(mmc), tag); 787 cqhci_finish_mrq(mmc, tag); 788 } 789 790 if (cq_host->waiting_for_idle && !cq_host->qcnt) { 791 cq_host->waiting_for_idle = false; 792 wake_up(&cq_host->wait_queue); 793 } 794 795 spin_unlock(&cq_host->lock); 796 } 797 798 if (status & CQHCI_IS_TCL) 799 wake_up(&cq_host->wait_queue); 800 801 if (status & CQHCI_IS_HAC) 802 wake_up(&cq_host->wait_queue); 803 804 return IRQ_HANDLED; 805} 806EXPORT_SYMBOL(cqhci_irq); 807 808static bool cqhci_is_idle(struct cqhci_host *cq_host, int *ret) 809{ 810 unsigned long flags; 811 bool is_idle; 812 813 spin_lock_irqsave(&cq_host->lock, flags); 814 is_idle = !cq_host->qcnt || cq_host->recovery_halt; 815 *ret = cq_host->recovery_halt ? -EBUSY : 0; 816 cq_host->waiting_for_idle = !is_idle; 817 spin_unlock_irqrestore(&cq_host->lock, flags); 818 819 return is_idle; 820} 821 822static int cqhci_wait_for_idle(struct mmc_host *mmc) 823{ 824 struct cqhci_host *cq_host = mmc->cqe_private; 825 int ret; 826 827 wait_event(cq_host->wait_queue, cqhci_is_idle(cq_host, &ret)); 828 829 return ret; 830} 831 832static bool cqhci_timeout(struct mmc_host *mmc, struct mmc_request *mrq, 833 bool *recovery_needed) 834{ 835 struct cqhci_host *cq_host = mmc->cqe_private; 836 int tag = cqhci_tag(mrq); 837 struct cqhci_slot *slot = &cq_host->slot[tag]; 838 unsigned long flags; 839 bool timed_out; 840 841 spin_lock_irqsave(&cq_host->lock, flags); 842 timed_out = slot->mrq == mrq; 843 if (timed_out) { 844 slot->flags |= CQHCI_EXTERNAL_TIMEOUT; 845 cqhci_recovery_needed(mmc, mrq, false); 846 *recovery_needed = cq_host->recovery_halt; 847 } 848 spin_unlock_irqrestore(&cq_host->lock, flags); 849 850 if (timed_out) { 851 pr_err("%s: cqhci: timeout for tag %d\n", 852 mmc_hostname(mmc), tag); 853 cqhci_dumpregs(cq_host); 854 } 855 856 return timed_out; 857} 858 859static bool cqhci_tasks_cleared(struct cqhci_host *cq_host) 860{ 861 return !(cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_CLEAR_ALL_TASKS); 862} 863 864static bool cqhci_clear_all_tasks(struct mmc_host *mmc, unsigned int timeout) 865{ 866 struct cqhci_host *cq_host = mmc->cqe_private; 867 bool ret; 868 u32 ctl; 869 870 cqhci_set_irqs(cq_host, CQHCI_IS_TCL); 871 872 ctl = cqhci_readl(cq_host, CQHCI_CTL); 873 ctl |= CQHCI_CLEAR_ALL_TASKS; 874 cqhci_writel(cq_host, ctl, CQHCI_CTL); 875 876 wait_event_timeout(cq_host->wait_queue, cqhci_tasks_cleared(cq_host), 877 msecs_to_jiffies(timeout) + 1); 878 879 cqhci_set_irqs(cq_host, 0); 880 881 ret = cqhci_tasks_cleared(cq_host); 882 883 if (!ret) 884 pr_debug("%s: cqhci: Failed to clear tasks\n", 885 mmc_hostname(mmc)); 886 887 return ret; 888} 889 890static bool cqhci_halted(struct cqhci_host *cq_host) 891{ 892 return cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT; 893} 894 895static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout) 896{ 897 struct cqhci_host *cq_host = mmc->cqe_private; 898 bool ret; 899 u32 ctl; 900 901 if (cqhci_halted(cq_host)) 902 return true; 903 904 cqhci_set_irqs(cq_host, CQHCI_IS_HAC); 905 906 ctl = cqhci_readl(cq_host, CQHCI_CTL); 907 ctl |= CQHCI_HALT; 908 cqhci_writel(cq_host, ctl, CQHCI_CTL); 909 910 wait_event_timeout(cq_host->wait_queue, cqhci_halted(cq_host), 911 msecs_to_jiffies(timeout) + 1); 912 913 cqhci_set_irqs(cq_host, 0); 914 915 ret = cqhci_halted(cq_host); 916 917 if (!ret) 918 pr_debug("%s: cqhci: Failed to halt\n", mmc_hostname(mmc)); 919 920 return ret; 921} 922 923/* 924 * After halting we expect to be able to use the command line. We interpret the 925 * failure to halt to mean the data lines might still be in use (and the upper 926 * layers will need to send a STOP command), so we set the timeout based on a 927 * generous command timeout. 928 */ 929#define CQHCI_START_HALT_TIMEOUT 5 930 931static void cqhci_recovery_start(struct mmc_host *mmc) 932{ 933 struct cqhci_host *cq_host = mmc->cqe_private; 934 935 pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__); 936 937 WARN_ON(!cq_host->recovery_halt); 938 939 cqhci_halt(mmc, CQHCI_START_HALT_TIMEOUT); 940 941 if (cq_host->ops->disable) 942 cq_host->ops->disable(mmc, true); 943 944 mmc->cqe_on = false; 945} 946 947static int cqhci_error_from_flags(unsigned int flags) 948{ 949 if (!flags) 950 return 0; 951 952 /* CRC errors might indicate re-tuning so prefer to report that */ 953 if (flags & CQHCI_HOST_CRC) 954 return -EILSEQ; 955 956 if (flags & (CQHCI_EXTERNAL_TIMEOUT | CQHCI_HOST_TIMEOUT)) 957 return -ETIMEDOUT; 958 959 return -EIO; 960} 961 962static void cqhci_recover_mrq(struct cqhci_host *cq_host, unsigned int tag) 963{ 964 struct cqhci_slot *slot = &cq_host->slot[tag]; 965 struct mmc_request *mrq = slot->mrq; 966 struct mmc_data *data; 967 968 if (!mrq) 969 return; 970 971 slot->mrq = NULL; 972 973 cq_host->qcnt -= 1; 974 975 data = mrq->data; 976 if (data) { 977 data->bytes_xfered = 0; 978 data->error = cqhci_error_from_flags(slot->flags); 979 } else { 980 mrq->cmd->error = cqhci_error_from_flags(slot->flags); 981 } 982 983 mmc_cqe_request_done(cq_host->mmc, mrq); 984} 985 986static void cqhci_recover_mrqs(struct cqhci_host *cq_host) 987{ 988 int i; 989 990 for (i = 0; i < cq_host->num_slots; i++) 991 cqhci_recover_mrq(cq_host, i); 992} 993 994/* 995 * By now the command and data lines should be unused so there is no reason for 996 * CQHCI to take a long time to halt, but if it doesn't halt there could be 997 * problems clearing tasks, so be generous. 998 */ 999#define CQHCI_FINISH_HALT_TIMEOUT 20 1000 1001/* CQHCI could be expected to clear it's internal state pretty quickly */ 1002#define CQHCI_CLEAR_TIMEOUT 20 1003 1004static void cqhci_recovery_finish(struct mmc_host *mmc) 1005{ 1006 struct cqhci_host *cq_host = mmc->cqe_private; 1007 unsigned long flags; 1008 u32 cqcfg; 1009 bool ok; 1010 1011 pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__); 1012 1013 WARN_ON(!cq_host->recovery_halt); 1014 1015 ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT); 1016 1017 if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT)) 1018 ok = false; 1019 1020 /* 1021 * The specification contradicts itself, by saying that tasks cannot be 1022 * cleared if CQHCI does not halt, but if CQHCI does not halt, it should 1023 * be disabled/re-enabled, but not to disable before clearing tasks. 1024 * Have a go anyway. 1025 */ 1026 if (!ok) { 1027 pr_debug("%s: cqhci: disable / re-enable\n", mmc_hostname(mmc)); 1028 cqcfg = cqhci_readl(cq_host, CQHCI_CFG); 1029 cqcfg &= ~CQHCI_ENABLE; 1030 cqhci_writel(cq_host, cqcfg, CQHCI_CFG); 1031 cqcfg |= CQHCI_ENABLE; 1032 cqhci_writel(cq_host, cqcfg, CQHCI_CFG); 1033 /* Be sure that there are no tasks */ 1034 ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT); 1035 if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT)) 1036 ok = false; 1037 WARN_ON(!ok); 1038 } 1039 1040 cqhci_recover_mrqs(cq_host); 1041 1042 WARN_ON(cq_host->qcnt); 1043 1044 spin_lock_irqsave(&cq_host->lock, flags); 1045 cq_host->qcnt = 0; 1046 cq_host->recovery_halt = false; 1047 mmc->cqe_on = false; 1048 spin_unlock_irqrestore(&cq_host->lock, flags); 1049 1050 /* Ensure all writes are done before interrupts are re-enabled */ 1051 wmb(); 1052 1053 cqhci_writel(cq_host, CQHCI_IS_HAC | CQHCI_IS_TCL, CQHCI_IS); 1054 1055 cqhci_set_irqs(cq_host, CQHCI_IS_MASK); 1056 1057 pr_debug("%s: cqhci: recovery done\n", mmc_hostname(mmc)); 1058} 1059 1060static const struct mmc_cqe_ops cqhci_cqe_ops = { 1061 .cqe_enable = cqhci_enable, 1062 .cqe_disable = cqhci_disable, 1063 .cqe_request = cqhci_request, 1064 .cqe_post_req = cqhci_post_req, 1065 .cqe_off = cqhci_off, 1066 .cqe_wait_for_idle = cqhci_wait_for_idle, 1067 .cqe_timeout = cqhci_timeout, 1068 .cqe_recovery_start = cqhci_recovery_start, 1069 .cqe_recovery_finish = cqhci_recovery_finish, 1070}; 1071 1072struct cqhci_host *cqhci_pltfm_init(struct platform_device *pdev) 1073{ 1074 struct cqhci_host *cq_host; 1075 struct resource *cqhci_memres = NULL; 1076 1077 /* check and setup CMDQ interface */ 1078 cqhci_memres = platform_get_resource_byname(pdev, IORESOURCE_MEM, 1079 "cqhci"); 1080 if (!cqhci_memres) { 1081 dev_dbg(&pdev->dev, "CMDQ not supported\n"); 1082 return ERR_PTR(-EINVAL); 1083 } 1084 1085 cq_host = devm_kzalloc(&pdev->dev, sizeof(*cq_host), GFP_KERNEL); 1086 if (!cq_host) 1087 return ERR_PTR(-ENOMEM); 1088 cq_host->mmio = devm_ioremap(&pdev->dev, 1089 cqhci_memres->start, 1090 resource_size(cqhci_memres)); 1091 if (!cq_host->mmio) { 1092 dev_err(&pdev->dev, "failed to remap cqhci regs\n"); 1093 return ERR_PTR(-EBUSY); 1094 } 1095 dev_dbg(&pdev->dev, "CMDQ ioremap: done\n"); 1096 1097 return cq_host; 1098} 1099EXPORT_SYMBOL(cqhci_pltfm_init); 1100 1101static unsigned int cqhci_ver_major(struct cqhci_host *cq_host) 1102{ 1103 return CQHCI_VER_MAJOR(cqhci_readl(cq_host, CQHCI_VER)); 1104} 1105 1106static unsigned int cqhci_ver_minor(struct cqhci_host *cq_host) 1107{ 1108 u32 ver = cqhci_readl(cq_host, CQHCI_VER); 1109 1110 return CQHCI_VER_MINOR1(ver) * 10 + CQHCI_VER_MINOR2(ver); 1111} 1112 1113int cqhci_init(struct cqhci_host *cq_host, struct mmc_host *mmc, 1114 bool dma64) 1115{ 1116 int err; 1117 1118 cq_host->dma64 = dma64; 1119 cq_host->mmc = mmc; 1120 cq_host->mmc->cqe_private = cq_host; 1121 1122 cq_host->num_slots = NUM_SLOTS; 1123 cq_host->dcmd_slot = DCMD_SLOT; 1124 1125 mmc->cqe_ops = &cqhci_cqe_ops; 1126 1127 mmc->cqe_qdepth = NUM_SLOTS; 1128 if (mmc->caps2 & MMC_CAP2_CQE_DCMD) 1129 mmc->cqe_qdepth -= 1; 1130 1131 cq_host->slot = devm_kcalloc(mmc_dev(mmc), cq_host->num_slots, 1132 sizeof(*cq_host->slot), GFP_KERNEL); 1133 if (!cq_host->slot) { 1134 err = -ENOMEM; 1135 goto out_err; 1136 } 1137 1138 spin_lock_init(&cq_host->lock); 1139 1140 init_completion(&cq_host->halt_comp); 1141 init_waitqueue_head(&cq_host->wait_queue); 1142 1143 pr_info("%s: CQHCI version %u.%02u\n", 1144 mmc_hostname(mmc), cqhci_ver_major(cq_host), 1145 cqhci_ver_minor(cq_host)); 1146 1147 return 0; 1148 1149out_err: 1150 pr_err("%s: CQHCI version %u.%02u failed to initialize, error %d\n", 1151 mmc_hostname(mmc), cqhci_ver_major(cq_host), 1152 cqhci_ver_minor(cq_host), err); 1153 return err; 1154} 1155EXPORT_SYMBOL(cqhci_init); 1156 1157MODULE_AUTHOR("Venkat Gopalakrishnan <venkatg@codeaurora.org>"); 1158MODULE_DESCRIPTION("Command Queue Host Controller Interface driver"); 1159MODULE_LICENSE("GPL v2");