Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v5.10-rc6 1165 lines 29 kB view raw
1// SPDX-License-Identifier: GPL-2.0-only 2/* Copyright (c) 2015, The Linux Foundation. All rights reserved. 3 */ 4 5#include <linux/delay.h> 6#include <linux/highmem.h> 7#include <linux/io.h> 8#include <linux/iopoll.h> 9#include <linux/module.h> 10#include <linux/dma-mapping.h> 11#include <linux/slab.h> 12#include <linux/scatterlist.h> 13#include <linux/platform_device.h> 14#include <linux/ktime.h> 15 16#include <linux/mmc/mmc.h> 17#include <linux/mmc/host.h> 18#include <linux/mmc/card.h> 19 20#include "cqhci.h" 21 22#define DCMD_SLOT 31 23#define NUM_SLOTS 32 24 25struct cqhci_slot { 26 struct mmc_request *mrq; 27 unsigned int flags; 28#define CQHCI_EXTERNAL_TIMEOUT BIT(0) 29#define CQHCI_COMPLETED BIT(1) 30#define CQHCI_HOST_CRC BIT(2) 31#define CQHCI_HOST_TIMEOUT BIT(3) 32#define CQHCI_HOST_OTHER BIT(4) 33}; 34 35static inline u8 *get_desc(struct cqhci_host *cq_host, u8 tag) 36{ 37 return cq_host->desc_base + (tag * cq_host->slot_sz); 38} 39 40static inline u8 *get_link_desc(struct cqhci_host *cq_host, u8 tag) 41{ 42 u8 *desc = get_desc(cq_host, tag); 43 44 return desc + cq_host->task_desc_len; 45} 46 47static inline dma_addr_t get_trans_desc_dma(struct cqhci_host *cq_host, u8 tag) 48{ 49 return cq_host->trans_desc_dma_base + 50 (cq_host->mmc->max_segs * tag * 51 cq_host->trans_desc_len); 52} 53 54static inline u8 *get_trans_desc(struct cqhci_host *cq_host, u8 tag) 55{ 56 return cq_host->trans_desc_base + 57 (cq_host->trans_desc_len * cq_host->mmc->max_segs * tag); 58} 59 60static void setup_trans_desc(struct cqhci_host *cq_host, u8 tag) 61{ 62 u8 *link_temp; 63 dma_addr_t trans_temp; 64 65 link_temp = get_link_desc(cq_host, tag); 66 trans_temp = get_trans_desc_dma(cq_host, tag); 67 68 memset(link_temp, 0, cq_host->link_desc_len); 69 if (cq_host->link_desc_len > 8) 70 *(link_temp + 8) = 0; 71 72 if (tag == DCMD_SLOT && (cq_host->mmc->caps2 & MMC_CAP2_CQE_DCMD)) { 73 *link_temp = CQHCI_VALID(0) | CQHCI_ACT(0) | CQHCI_END(1); 74 return; 75 } 76 77 *link_temp = CQHCI_VALID(1) | CQHCI_ACT(0x6) | CQHCI_END(0); 78 79 if (cq_host->dma64) { 80 __le64 *data_addr = (__le64 __force *)(link_temp + 4); 81 82 data_addr[0] = cpu_to_le64(trans_temp); 83 } else { 84 __le32 *data_addr = (__le32 __force *)(link_temp + 4); 85 86 data_addr[0] = cpu_to_le32(trans_temp); 87 } 88} 89 90static void cqhci_set_irqs(struct cqhci_host *cq_host, u32 set) 91{ 92 cqhci_writel(cq_host, set, CQHCI_ISTE); 93 cqhci_writel(cq_host, set, CQHCI_ISGE); 94} 95 96#define DRV_NAME "cqhci" 97 98#define CQHCI_DUMP(f, x...) \ 99 pr_err("%s: " DRV_NAME ": " f, mmc_hostname(mmc), ## x) 100 101static void cqhci_dumpregs(struct cqhci_host *cq_host) 102{ 103 struct mmc_host *mmc = cq_host->mmc; 104 105 CQHCI_DUMP("============ CQHCI REGISTER DUMP ===========\n"); 106 107 CQHCI_DUMP("Caps: 0x%08x | Version: 0x%08x\n", 108 cqhci_readl(cq_host, CQHCI_CAP), 109 cqhci_readl(cq_host, CQHCI_VER)); 110 CQHCI_DUMP("Config: 0x%08x | Control: 0x%08x\n", 111 cqhci_readl(cq_host, CQHCI_CFG), 112 cqhci_readl(cq_host, CQHCI_CTL)); 113 CQHCI_DUMP("Int stat: 0x%08x | Int enab: 0x%08x\n", 114 cqhci_readl(cq_host, CQHCI_IS), 115 cqhci_readl(cq_host, CQHCI_ISTE)); 116 CQHCI_DUMP("Int sig: 0x%08x | Int Coal: 0x%08x\n", 117 cqhci_readl(cq_host, CQHCI_ISGE), 118 cqhci_readl(cq_host, CQHCI_IC)); 119 CQHCI_DUMP("TDL base: 0x%08x | TDL up32: 0x%08x\n", 120 cqhci_readl(cq_host, CQHCI_TDLBA), 121 cqhci_readl(cq_host, CQHCI_TDLBAU)); 122 CQHCI_DUMP("Doorbell: 0x%08x | TCN: 0x%08x\n", 123 cqhci_readl(cq_host, CQHCI_TDBR), 124 cqhci_readl(cq_host, CQHCI_TCN)); 125 CQHCI_DUMP("Dev queue: 0x%08x | Dev Pend: 0x%08x\n", 126 cqhci_readl(cq_host, CQHCI_DQS), 127 cqhci_readl(cq_host, CQHCI_DPT)); 128 CQHCI_DUMP("Task clr: 0x%08x | SSC1: 0x%08x\n", 129 cqhci_readl(cq_host, CQHCI_TCLR), 130 cqhci_readl(cq_host, CQHCI_SSC1)); 131 CQHCI_DUMP("SSC2: 0x%08x | DCMD rsp: 0x%08x\n", 132 cqhci_readl(cq_host, CQHCI_SSC2), 133 cqhci_readl(cq_host, CQHCI_CRDCT)); 134 CQHCI_DUMP("RED mask: 0x%08x | TERRI: 0x%08x\n", 135 cqhci_readl(cq_host, CQHCI_RMEM), 136 cqhci_readl(cq_host, CQHCI_TERRI)); 137 CQHCI_DUMP("Resp idx: 0x%08x | Resp arg: 0x%08x\n", 138 cqhci_readl(cq_host, CQHCI_CRI), 139 cqhci_readl(cq_host, CQHCI_CRA)); 140 141 if (cq_host->ops->dumpregs) 142 cq_host->ops->dumpregs(mmc); 143 else 144 CQHCI_DUMP(": ===========================================\n"); 145} 146 147/* 148 * The allocated descriptor table for task, link & transfer descritors 149 * looks like: 150 * |----------| 151 * |task desc | |->|----------| 152 * |----------| | |trans desc| 153 * |link desc-|->| |----------| 154 * |----------| . 155 * . . 156 * no. of slots max-segs 157 * . |----------| 158 * |----------| 159 * The idea here is to create the [task+trans] table and mark & point the 160 * link desc to the transfer desc table on a per slot basis. 161 */ 162static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host) 163{ 164 int i = 0; 165 166 /* task descriptor can be 64/128 bit irrespective of arch */ 167 if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) { 168 cqhci_writel(cq_host, cqhci_readl(cq_host, CQHCI_CFG) | 169 CQHCI_TASK_DESC_SZ, CQHCI_CFG); 170 cq_host->task_desc_len = 16; 171 } else { 172 cq_host->task_desc_len = 8; 173 } 174 175 /* 176 * 96 bits length of transfer desc instead of 128 bits which means 177 * ADMA would expect next valid descriptor at the 96th bit 178 * or 128th bit 179 */ 180 if (cq_host->dma64) { 181 if (cq_host->quirks & CQHCI_QUIRK_SHORT_TXFR_DESC_SZ) 182 cq_host->trans_desc_len = 12; 183 else 184 cq_host->trans_desc_len = 16; 185 cq_host->link_desc_len = 16; 186 } else { 187 cq_host->trans_desc_len = 8; 188 cq_host->link_desc_len = 8; 189 } 190 191 /* total size of a slot: 1 task & 1 transfer (link) */ 192 cq_host->slot_sz = cq_host->task_desc_len + cq_host->link_desc_len; 193 194 cq_host->desc_size = cq_host->slot_sz * cq_host->num_slots; 195 196 cq_host->data_size = cq_host->trans_desc_len * cq_host->mmc->max_segs * 197 cq_host->mmc->cqe_qdepth; 198 199 pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n", 200 mmc_hostname(cq_host->mmc), cq_host->desc_size, cq_host->data_size, 201 cq_host->slot_sz); 202 203 /* 204 * allocate a dma-mapped chunk of memory for the descriptors 205 * allocate a dma-mapped chunk of memory for link descriptors 206 * setup each link-desc memory offset per slot-number to 207 * the descriptor table. 208 */ 209 cq_host->desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc), 210 cq_host->desc_size, 211 &cq_host->desc_dma_base, 212 GFP_KERNEL); 213 if (!cq_host->desc_base) 214 return -ENOMEM; 215 216 cq_host->trans_desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc), 217 cq_host->data_size, 218 &cq_host->trans_desc_dma_base, 219 GFP_KERNEL); 220 if (!cq_host->trans_desc_base) { 221 dmam_free_coherent(mmc_dev(cq_host->mmc), cq_host->desc_size, 222 cq_host->desc_base, 223 cq_host->desc_dma_base); 224 cq_host->desc_base = NULL; 225 cq_host->desc_dma_base = 0; 226 return -ENOMEM; 227 } 228 229 pr_debug("%s: cqhci: desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n", 230 mmc_hostname(cq_host->mmc), cq_host->desc_base, cq_host->trans_desc_base, 231 (unsigned long long)cq_host->desc_dma_base, 232 (unsigned long long)cq_host->trans_desc_dma_base); 233 234 for (; i < (cq_host->num_slots); i++) 235 setup_trans_desc(cq_host, i); 236 237 return 0; 238} 239 240static void __cqhci_enable(struct cqhci_host *cq_host) 241{ 242 struct mmc_host *mmc = cq_host->mmc; 243 u32 cqcfg; 244 245 cqcfg = cqhci_readl(cq_host, CQHCI_CFG); 246 247 /* Configuration must not be changed while enabled */ 248 if (cqcfg & CQHCI_ENABLE) { 249 cqcfg &= ~CQHCI_ENABLE; 250 cqhci_writel(cq_host, cqcfg, CQHCI_CFG); 251 } 252 253 cqcfg &= ~(CQHCI_DCMD | CQHCI_TASK_DESC_SZ); 254 255 if (mmc->caps2 & MMC_CAP2_CQE_DCMD) 256 cqcfg |= CQHCI_DCMD; 257 258 if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) 259 cqcfg |= CQHCI_TASK_DESC_SZ; 260 261 cqhci_writel(cq_host, cqcfg, CQHCI_CFG); 262 263 cqhci_writel(cq_host, lower_32_bits(cq_host->desc_dma_base), 264 CQHCI_TDLBA); 265 cqhci_writel(cq_host, upper_32_bits(cq_host->desc_dma_base), 266 CQHCI_TDLBAU); 267 268 cqhci_writel(cq_host, cq_host->rca, CQHCI_SSC2); 269 270 cqhci_set_irqs(cq_host, 0); 271 272 cqcfg |= CQHCI_ENABLE; 273 274 cqhci_writel(cq_host, cqcfg, CQHCI_CFG); 275 276 mmc->cqe_on = true; 277 278 if (cq_host->ops->enable) 279 cq_host->ops->enable(mmc); 280 281 /* Ensure all writes are done before interrupts are enabled */ 282 wmb(); 283 284 cqhci_set_irqs(cq_host, CQHCI_IS_MASK); 285 286 cq_host->activated = true; 287} 288 289static void __cqhci_disable(struct cqhci_host *cq_host) 290{ 291 u32 cqcfg; 292 293 cqcfg = cqhci_readl(cq_host, CQHCI_CFG); 294 cqcfg &= ~CQHCI_ENABLE; 295 cqhci_writel(cq_host, cqcfg, CQHCI_CFG); 296 297 cq_host->mmc->cqe_on = false; 298 299 cq_host->activated = false; 300} 301 302int cqhci_deactivate(struct mmc_host *mmc) 303{ 304 struct cqhci_host *cq_host = mmc->cqe_private; 305 306 if (cq_host->enabled && cq_host->activated) 307 __cqhci_disable(cq_host); 308 309 return 0; 310} 311EXPORT_SYMBOL(cqhci_deactivate); 312 313int cqhci_resume(struct mmc_host *mmc) 314{ 315 /* Re-enable is done upon first request */ 316 return 0; 317} 318EXPORT_SYMBOL(cqhci_resume); 319 320static int cqhci_enable(struct mmc_host *mmc, struct mmc_card *card) 321{ 322 struct cqhci_host *cq_host = mmc->cqe_private; 323 int err; 324 325 if (!card->ext_csd.cmdq_en) 326 return -EINVAL; 327 328 if (cq_host->enabled) 329 return 0; 330 331 cq_host->rca = card->rca; 332 333 err = cqhci_host_alloc_tdl(cq_host); 334 if (err) { 335 pr_err("%s: Failed to enable CQE, error %d\n", 336 mmc_hostname(mmc), err); 337 return err; 338 } 339 340 __cqhci_enable(cq_host); 341 342 cq_host->enabled = true; 343 344#ifdef DEBUG 345 cqhci_dumpregs(cq_host); 346#endif 347 return 0; 348} 349 350/* CQHCI is idle and should halt immediately, so set a small timeout */ 351#define CQHCI_OFF_TIMEOUT 100 352 353static u32 cqhci_read_ctl(struct cqhci_host *cq_host) 354{ 355 return cqhci_readl(cq_host, CQHCI_CTL); 356} 357 358static void cqhci_off(struct mmc_host *mmc) 359{ 360 struct cqhci_host *cq_host = mmc->cqe_private; 361 u32 reg; 362 int err; 363 364 if (!cq_host->enabled || !mmc->cqe_on || cq_host->recovery_halt) 365 return; 366 367 if (cq_host->ops->disable) 368 cq_host->ops->disable(mmc, false); 369 370 cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL); 371 372 err = readx_poll_timeout(cqhci_read_ctl, cq_host, reg, 373 reg & CQHCI_HALT, 0, CQHCI_OFF_TIMEOUT); 374 if (err < 0) 375 pr_err("%s: cqhci: CQE stuck on\n", mmc_hostname(mmc)); 376 else 377 pr_debug("%s: cqhci: CQE off\n", mmc_hostname(mmc)); 378 379 if (cq_host->ops->post_disable) 380 cq_host->ops->post_disable(mmc); 381 382 mmc->cqe_on = false; 383} 384 385static void cqhci_disable(struct mmc_host *mmc) 386{ 387 struct cqhci_host *cq_host = mmc->cqe_private; 388 389 if (!cq_host->enabled) 390 return; 391 392 cqhci_off(mmc); 393 394 __cqhci_disable(cq_host); 395 396 dmam_free_coherent(mmc_dev(mmc), cq_host->data_size, 397 cq_host->trans_desc_base, 398 cq_host->trans_desc_dma_base); 399 400 dmam_free_coherent(mmc_dev(mmc), cq_host->desc_size, 401 cq_host->desc_base, 402 cq_host->desc_dma_base); 403 404 cq_host->trans_desc_base = NULL; 405 cq_host->desc_base = NULL; 406 407 cq_host->enabled = false; 408} 409 410static void cqhci_prep_task_desc(struct mmc_request *mrq, 411 u64 *data, bool intr) 412{ 413 u32 req_flags = mrq->data->flags; 414 415 *data = CQHCI_VALID(1) | 416 CQHCI_END(1) | 417 CQHCI_INT(intr) | 418 CQHCI_ACT(0x5) | 419 CQHCI_FORCED_PROG(!!(req_flags & MMC_DATA_FORCED_PRG)) | 420 CQHCI_DATA_TAG(!!(req_flags & MMC_DATA_DAT_TAG)) | 421 CQHCI_DATA_DIR(!!(req_flags & MMC_DATA_READ)) | 422 CQHCI_PRIORITY(!!(req_flags & MMC_DATA_PRIO)) | 423 CQHCI_QBAR(!!(req_flags & MMC_DATA_QBR)) | 424 CQHCI_REL_WRITE(!!(req_flags & MMC_DATA_REL_WR)) | 425 CQHCI_BLK_COUNT(mrq->data->blocks) | 426 CQHCI_BLK_ADDR((u64)mrq->data->blk_addr); 427 428 pr_debug("%s: cqhci: tag %d task descriptor 0x%016llx\n", 429 mmc_hostname(mrq->host), mrq->tag, (unsigned long long)*data); 430} 431 432static int cqhci_dma_map(struct mmc_host *host, struct mmc_request *mrq) 433{ 434 int sg_count; 435 struct mmc_data *data = mrq->data; 436 437 if (!data) 438 return -EINVAL; 439 440 sg_count = dma_map_sg(mmc_dev(host), data->sg, 441 data->sg_len, 442 (data->flags & MMC_DATA_WRITE) ? 443 DMA_TO_DEVICE : DMA_FROM_DEVICE); 444 if (!sg_count) { 445 pr_err("%s: sg-len: %d\n", __func__, data->sg_len); 446 return -ENOMEM; 447 } 448 449 return sg_count; 450} 451 452static void cqhci_set_tran_desc(u8 *desc, dma_addr_t addr, int len, bool end, 453 bool dma64) 454{ 455 __le32 *attr = (__le32 __force *)desc; 456 457 *attr = (CQHCI_VALID(1) | 458 CQHCI_END(end ? 1 : 0) | 459 CQHCI_INT(0) | 460 CQHCI_ACT(0x4) | 461 CQHCI_DAT_LENGTH(len)); 462 463 if (dma64) { 464 __le64 *dataddr = (__le64 __force *)(desc + 4); 465 466 dataddr[0] = cpu_to_le64(addr); 467 } else { 468 __le32 *dataddr = (__le32 __force *)(desc + 4); 469 470 dataddr[0] = cpu_to_le32(addr); 471 } 472} 473 474static int cqhci_prep_tran_desc(struct mmc_request *mrq, 475 struct cqhci_host *cq_host, int tag) 476{ 477 struct mmc_data *data = mrq->data; 478 int i, sg_count, len; 479 bool end = false; 480 bool dma64 = cq_host->dma64; 481 dma_addr_t addr; 482 u8 *desc; 483 struct scatterlist *sg; 484 485 sg_count = cqhci_dma_map(mrq->host, mrq); 486 if (sg_count < 0) { 487 pr_err("%s: %s: unable to map sg lists, %d\n", 488 mmc_hostname(mrq->host), __func__, sg_count); 489 return sg_count; 490 } 491 492 desc = get_trans_desc(cq_host, tag); 493 494 for_each_sg(data->sg, sg, sg_count, i) { 495 addr = sg_dma_address(sg); 496 len = sg_dma_len(sg); 497 498 if ((i+1) == sg_count) 499 end = true; 500 cqhci_set_tran_desc(desc, addr, len, end, dma64); 501 desc += cq_host->trans_desc_len; 502 } 503 504 return 0; 505} 506 507static void cqhci_prep_dcmd_desc(struct mmc_host *mmc, 508 struct mmc_request *mrq) 509{ 510 u64 *task_desc = NULL; 511 u64 data = 0; 512 u8 resp_type; 513 u8 *desc; 514 __le64 *dataddr; 515 struct cqhci_host *cq_host = mmc->cqe_private; 516 u8 timing; 517 518 if (!(mrq->cmd->flags & MMC_RSP_PRESENT)) { 519 resp_type = 0x0; 520 timing = 0x1; 521 } else { 522 if (mrq->cmd->flags & MMC_RSP_R1B) { 523 resp_type = 0x3; 524 timing = 0x0; 525 } else { 526 resp_type = 0x2; 527 timing = 0x1; 528 } 529 } 530 531 task_desc = (__le64 __force *)get_desc(cq_host, cq_host->dcmd_slot); 532 memset(task_desc, 0, cq_host->task_desc_len); 533 data |= (CQHCI_VALID(1) | 534 CQHCI_END(1) | 535 CQHCI_INT(1) | 536 CQHCI_QBAR(1) | 537 CQHCI_ACT(0x5) | 538 CQHCI_CMD_INDEX(mrq->cmd->opcode) | 539 CQHCI_CMD_TIMING(timing) | CQHCI_RESP_TYPE(resp_type)); 540 if (cq_host->ops->update_dcmd_desc) 541 cq_host->ops->update_dcmd_desc(mmc, mrq, &data); 542 *task_desc |= data; 543 desc = (u8 *)task_desc; 544 pr_debug("%s: cqhci: dcmd: cmd: %d timing: %d resp: %d\n", 545 mmc_hostname(mmc), mrq->cmd->opcode, timing, resp_type); 546 dataddr = (__le64 __force *)(desc + 4); 547 dataddr[0] = cpu_to_le64((u64)mrq->cmd->arg); 548 549} 550 551static void cqhci_post_req(struct mmc_host *host, struct mmc_request *mrq) 552{ 553 struct mmc_data *data = mrq->data; 554 555 if (data) { 556 dma_unmap_sg(mmc_dev(host), data->sg, data->sg_len, 557 (data->flags & MMC_DATA_READ) ? 558 DMA_FROM_DEVICE : DMA_TO_DEVICE); 559 } 560} 561 562static inline int cqhci_tag(struct mmc_request *mrq) 563{ 564 return mrq->cmd ? DCMD_SLOT : mrq->tag; 565} 566 567static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq) 568{ 569 int err = 0; 570 u64 data = 0; 571 u64 *task_desc = NULL; 572 int tag = cqhci_tag(mrq); 573 struct cqhci_host *cq_host = mmc->cqe_private; 574 unsigned long flags; 575 576 if (!cq_host->enabled) { 577 pr_err("%s: cqhci: not enabled\n", mmc_hostname(mmc)); 578 return -EINVAL; 579 } 580 581 /* First request after resume has to re-enable */ 582 if (!cq_host->activated) 583 __cqhci_enable(cq_host); 584 585 if (!mmc->cqe_on) { 586 if (cq_host->ops->pre_enable) 587 cq_host->ops->pre_enable(mmc); 588 589 cqhci_writel(cq_host, 0, CQHCI_CTL); 590 mmc->cqe_on = true; 591 pr_debug("%s: cqhci: CQE on\n", mmc_hostname(mmc)); 592 if (cqhci_readl(cq_host, CQHCI_CTL) && CQHCI_HALT) { 593 pr_err("%s: cqhci: CQE failed to exit halt state\n", 594 mmc_hostname(mmc)); 595 } 596 if (cq_host->ops->enable) 597 cq_host->ops->enable(mmc); 598 } 599 600 if (mrq->data) { 601 task_desc = (__le64 __force *)get_desc(cq_host, tag); 602 cqhci_prep_task_desc(mrq, &data, 1); 603 *task_desc = cpu_to_le64(data); 604 err = cqhci_prep_tran_desc(mrq, cq_host, tag); 605 if (err) { 606 pr_err("%s: cqhci: failed to setup tx desc: %d\n", 607 mmc_hostname(mmc), err); 608 return err; 609 } 610 } else { 611 cqhci_prep_dcmd_desc(mmc, mrq); 612 } 613 614 spin_lock_irqsave(&cq_host->lock, flags); 615 616 if (cq_host->recovery_halt) { 617 err = -EBUSY; 618 goto out_unlock; 619 } 620 621 cq_host->slot[tag].mrq = mrq; 622 cq_host->slot[tag].flags = 0; 623 624 cq_host->qcnt += 1; 625 /* Make sure descriptors are ready before ringing the doorbell */ 626 wmb(); 627 cqhci_writel(cq_host, 1 << tag, CQHCI_TDBR); 628 if (!(cqhci_readl(cq_host, CQHCI_TDBR) & (1 << tag))) 629 pr_debug("%s: cqhci: doorbell not set for tag %d\n", 630 mmc_hostname(mmc), tag); 631out_unlock: 632 spin_unlock_irqrestore(&cq_host->lock, flags); 633 634 if (err) 635 cqhci_post_req(mmc, mrq); 636 637 return err; 638} 639 640static void cqhci_recovery_needed(struct mmc_host *mmc, struct mmc_request *mrq, 641 bool notify) 642{ 643 struct cqhci_host *cq_host = mmc->cqe_private; 644 645 if (!cq_host->recovery_halt) { 646 cq_host->recovery_halt = true; 647 pr_debug("%s: cqhci: recovery needed\n", mmc_hostname(mmc)); 648 wake_up(&cq_host->wait_queue); 649 if (notify && mrq->recovery_notifier) 650 mrq->recovery_notifier(mrq); 651 } 652} 653 654static unsigned int cqhci_error_flags(int error1, int error2) 655{ 656 int error = error1 ? error1 : error2; 657 658 switch (error) { 659 case -EILSEQ: 660 return CQHCI_HOST_CRC; 661 case -ETIMEDOUT: 662 return CQHCI_HOST_TIMEOUT; 663 default: 664 return CQHCI_HOST_OTHER; 665 } 666} 667 668static void cqhci_error_irq(struct mmc_host *mmc, u32 status, int cmd_error, 669 int data_error) 670{ 671 struct cqhci_host *cq_host = mmc->cqe_private; 672 struct cqhci_slot *slot; 673 u32 terri; 674 int tag; 675 676 spin_lock(&cq_host->lock); 677 678 terri = cqhci_readl(cq_host, CQHCI_TERRI); 679 680 pr_debug("%s: cqhci: error IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n", 681 mmc_hostname(mmc), status, cmd_error, data_error, terri); 682 683 /* Forget about errors when recovery has already been triggered */ 684 if (cq_host->recovery_halt) 685 goto out_unlock; 686 687 if (!cq_host->qcnt) { 688 WARN_ONCE(1, "%s: cqhci: error when idle. IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n", 689 mmc_hostname(mmc), status, cmd_error, data_error, 690 terri); 691 goto out_unlock; 692 } 693 694 if (CQHCI_TERRI_C_VALID(terri)) { 695 tag = CQHCI_TERRI_C_TASK(terri); 696 slot = &cq_host->slot[tag]; 697 if (slot->mrq) { 698 slot->flags = cqhci_error_flags(cmd_error, data_error); 699 cqhci_recovery_needed(mmc, slot->mrq, true); 700 } 701 } 702 703 if (CQHCI_TERRI_D_VALID(terri)) { 704 tag = CQHCI_TERRI_D_TASK(terri); 705 slot = &cq_host->slot[tag]; 706 if (slot->mrq) { 707 slot->flags = cqhci_error_flags(data_error, cmd_error); 708 cqhci_recovery_needed(mmc, slot->mrq, true); 709 } 710 } 711 712 if (!cq_host->recovery_halt) { 713 /* 714 * The only way to guarantee forward progress is to mark at 715 * least one task in error, so if none is indicated, pick one. 716 */ 717 for (tag = 0; tag < NUM_SLOTS; tag++) { 718 slot = &cq_host->slot[tag]; 719 if (!slot->mrq) 720 continue; 721 slot->flags = cqhci_error_flags(data_error, cmd_error); 722 cqhci_recovery_needed(mmc, slot->mrq, true); 723 break; 724 } 725 } 726 727out_unlock: 728 spin_unlock(&cq_host->lock); 729} 730 731static void cqhci_finish_mrq(struct mmc_host *mmc, unsigned int tag) 732{ 733 struct cqhci_host *cq_host = mmc->cqe_private; 734 struct cqhci_slot *slot = &cq_host->slot[tag]; 735 struct mmc_request *mrq = slot->mrq; 736 struct mmc_data *data; 737 738 if (!mrq) { 739 WARN_ONCE(1, "%s: cqhci: spurious TCN for tag %d\n", 740 mmc_hostname(mmc), tag); 741 return; 742 } 743 744 /* No completions allowed during recovery */ 745 if (cq_host->recovery_halt) { 746 slot->flags |= CQHCI_COMPLETED; 747 return; 748 } 749 750 slot->mrq = NULL; 751 752 cq_host->qcnt -= 1; 753 754 data = mrq->data; 755 if (data) { 756 if (data->error) 757 data->bytes_xfered = 0; 758 else 759 data->bytes_xfered = data->blksz * data->blocks; 760 } 761 762 mmc_cqe_request_done(mmc, mrq); 763} 764 765irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error, 766 int data_error) 767{ 768 u32 status; 769 unsigned long tag = 0, comp_status; 770 struct cqhci_host *cq_host = mmc->cqe_private; 771 772 status = cqhci_readl(cq_host, CQHCI_IS); 773 cqhci_writel(cq_host, status, CQHCI_IS); 774 775 pr_debug("%s: cqhci: IRQ status: 0x%08x\n", mmc_hostname(mmc), status); 776 777 if ((status & CQHCI_IS_RED) || cmd_error || data_error) 778 cqhci_error_irq(mmc, status, cmd_error, data_error); 779 780 if (status & CQHCI_IS_TCC) { 781 /* read TCN and complete the request */ 782 comp_status = cqhci_readl(cq_host, CQHCI_TCN); 783 cqhci_writel(cq_host, comp_status, CQHCI_TCN); 784 pr_debug("%s: cqhci: TCN: 0x%08lx\n", 785 mmc_hostname(mmc), comp_status); 786 787 spin_lock(&cq_host->lock); 788 789 for_each_set_bit(tag, &comp_status, cq_host->num_slots) { 790 /* complete the corresponding mrq */ 791 pr_debug("%s: cqhci: completing tag %lu\n", 792 mmc_hostname(mmc), tag); 793 cqhci_finish_mrq(mmc, tag); 794 } 795 796 if (cq_host->waiting_for_idle && !cq_host->qcnt) { 797 cq_host->waiting_for_idle = false; 798 wake_up(&cq_host->wait_queue); 799 } 800 801 spin_unlock(&cq_host->lock); 802 } 803 804 if (status & CQHCI_IS_TCL) 805 wake_up(&cq_host->wait_queue); 806 807 if (status & CQHCI_IS_HAC) 808 wake_up(&cq_host->wait_queue); 809 810 return IRQ_HANDLED; 811} 812EXPORT_SYMBOL(cqhci_irq); 813 814static bool cqhci_is_idle(struct cqhci_host *cq_host, int *ret) 815{ 816 unsigned long flags; 817 bool is_idle; 818 819 spin_lock_irqsave(&cq_host->lock, flags); 820 is_idle = !cq_host->qcnt || cq_host->recovery_halt; 821 *ret = cq_host->recovery_halt ? -EBUSY : 0; 822 cq_host->waiting_for_idle = !is_idle; 823 spin_unlock_irqrestore(&cq_host->lock, flags); 824 825 return is_idle; 826} 827 828static int cqhci_wait_for_idle(struct mmc_host *mmc) 829{ 830 struct cqhci_host *cq_host = mmc->cqe_private; 831 int ret; 832 833 wait_event(cq_host->wait_queue, cqhci_is_idle(cq_host, &ret)); 834 835 return ret; 836} 837 838static bool cqhci_timeout(struct mmc_host *mmc, struct mmc_request *mrq, 839 bool *recovery_needed) 840{ 841 struct cqhci_host *cq_host = mmc->cqe_private; 842 int tag = cqhci_tag(mrq); 843 struct cqhci_slot *slot = &cq_host->slot[tag]; 844 unsigned long flags; 845 bool timed_out; 846 847 spin_lock_irqsave(&cq_host->lock, flags); 848 timed_out = slot->mrq == mrq; 849 if (timed_out) { 850 slot->flags |= CQHCI_EXTERNAL_TIMEOUT; 851 cqhci_recovery_needed(mmc, mrq, false); 852 *recovery_needed = cq_host->recovery_halt; 853 } 854 spin_unlock_irqrestore(&cq_host->lock, flags); 855 856 if (timed_out) { 857 pr_err("%s: cqhci: timeout for tag %d\n", 858 mmc_hostname(mmc), tag); 859 cqhci_dumpregs(cq_host); 860 } 861 862 return timed_out; 863} 864 865static bool cqhci_tasks_cleared(struct cqhci_host *cq_host) 866{ 867 return !(cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_CLEAR_ALL_TASKS); 868} 869 870static bool cqhci_clear_all_tasks(struct mmc_host *mmc, unsigned int timeout) 871{ 872 struct cqhci_host *cq_host = mmc->cqe_private; 873 bool ret; 874 u32 ctl; 875 876 cqhci_set_irqs(cq_host, CQHCI_IS_TCL); 877 878 ctl = cqhci_readl(cq_host, CQHCI_CTL); 879 ctl |= CQHCI_CLEAR_ALL_TASKS; 880 cqhci_writel(cq_host, ctl, CQHCI_CTL); 881 882 wait_event_timeout(cq_host->wait_queue, cqhci_tasks_cleared(cq_host), 883 msecs_to_jiffies(timeout) + 1); 884 885 cqhci_set_irqs(cq_host, 0); 886 887 ret = cqhci_tasks_cleared(cq_host); 888 889 if (!ret) 890 pr_debug("%s: cqhci: Failed to clear tasks\n", 891 mmc_hostname(mmc)); 892 893 return ret; 894} 895 896static bool cqhci_halted(struct cqhci_host *cq_host) 897{ 898 return cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT; 899} 900 901static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout) 902{ 903 struct cqhci_host *cq_host = mmc->cqe_private; 904 bool ret; 905 u32 ctl; 906 907 if (cqhci_halted(cq_host)) 908 return true; 909 910 cqhci_set_irqs(cq_host, CQHCI_IS_HAC); 911 912 ctl = cqhci_readl(cq_host, CQHCI_CTL); 913 ctl |= CQHCI_HALT; 914 cqhci_writel(cq_host, ctl, CQHCI_CTL); 915 916 wait_event_timeout(cq_host->wait_queue, cqhci_halted(cq_host), 917 msecs_to_jiffies(timeout) + 1); 918 919 cqhci_set_irqs(cq_host, 0); 920 921 ret = cqhci_halted(cq_host); 922 923 if (!ret) 924 pr_debug("%s: cqhci: Failed to halt\n", mmc_hostname(mmc)); 925 926 return ret; 927} 928 929/* 930 * After halting we expect to be able to use the command line. We interpret the 931 * failure to halt to mean the data lines might still be in use (and the upper 932 * layers will need to send a STOP command), so we set the timeout based on a 933 * generous command timeout. 934 */ 935#define CQHCI_START_HALT_TIMEOUT 5 936 937static void cqhci_recovery_start(struct mmc_host *mmc) 938{ 939 struct cqhci_host *cq_host = mmc->cqe_private; 940 941 pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__); 942 943 WARN_ON(!cq_host->recovery_halt); 944 945 cqhci_halt(mmc, CQHCI_START_HALT_TIMEOUT); 946 947 if (cq_host->ops->disable) 948 cq_host->ops->disable(mmc, true); 949 950 mmc->cqe_on = false; 951} 952 953static int cqhci_error_from_flags(unsigned int flags) 954{ 955 if (!flags) 956 return 0; 957 958 /* CRC errors might indicate re-tuning so prefer to report that */ 959 if (flags & CQHCI_HOST_CRC) 960 return -EILSEQ; 961 962 if (flags & (CQHCI_EXTERNAL_TIMEOUT | CQHCI_HOST_TIMEOUT)) 963 return -ETIMEDOUT; 964 965 return -EIO; 966} 967 968static void cqhci_recover_mrq(struct cqhci_host *cq_host, unsigned int tag) 969{ 970 struct cqhci_slot *slot = &cq_host->slot[tag]; 971 struct mmc_request *mrq = slot->mrq; 972 struct mmc_data *data; 973 974 if (!mrq) 975 return; 976 977 slot->mrq = NULL; 978 979 cq_host->qcnt -= 1; 980 981 data = mrq->data; 982 if (data) { 983 data->bytes_xfered = 0; 984 data->error = cqhci_error_from_flags(slot->flags); 985 } else { 986 mrq->cmd->error = cqhci_error_from_flags(slot->flags); 987 } 988 989 mmc_cqe_request_done(cq_host->mmc, mrq); 990} 991 992static void cqhci_recover_mrqs(struct cqhci_host *cq_host) 993{ 994 int i; 995 996 for (i = 0; i < cq_host->num_slots; i++) 997 cqhci_recover_mrq(cq_host, i); 998} 999 1000/* 1001 * By now the command and data lines should be unused so there is no reason for 1002 * CQHCI to take a long time to halt, but if it doesn't halt there could be 1003 * problems clearing tasks, so be generous. 1004 */ 1005#define CQHCI_FINISH_HALT_TIMEOUT 20 1006 1007/* CQHCI could be expected to clear it's internal state pretty quickly */ 1008#define CQHCI_CLEAR_TIMEOUT 20 1009 1010static void cqhci_recovery_finish(struct mmc_host *mmc) 1011{ 1012 struct cqhci_host *cq_host = mmc->cqe_private; 1013 unsigned long flags; 1014 u32 cqcfg; 1015 bool ok; 1016 1017 pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__); 1018 1019 WARN_ON(!cq_host->recovery_halt); 1020 1021 ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT); 1022 1023 if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT)) 1024 ok = false; 1025 1026 /* 1027 * The specification contradicts itself, by saying that tasks cannot be 1028 * cleared if CQHCI does not halt, but if CQHCI does not halt, it should 1029 * be disabled/re-enabled, but not to disable before clearing tasks. 1030 * Have a go anyway. 1031 */ 1032 if (!ok) { 1033 pr_debug("%s: cqhci: disable / re-enable\n", mmc_hostname(mmc)); 1034 cqcfg = cqhci_readl(cq_host, CQHCI_CFG); 1035 cqcfg &= ~CQHCI_ENABLE; 1036 cqhci_writel(cq_host, cqcfg, CQHCI_CFG); 1037 cqcfg |= CQHCI_ENABLE; 1038 cqhci_writel(cq_host, cqcfg, CQHCI_CFG); 1039 /* Be sure that there are no tasks */ 1040 ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT); 1041 if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT)) 1042 ok = false; 1043 WARN_ON(!ok); 1044 } 1045 1046 cqhci_recover_mrqs(cq_host); 1047 1048 WARN_ON(cq_host->qcnt); 1049 1050 spin_lock_irqsave(&cq_host->lock, flags); 1051 cq_host->qcnt = 0; 1052 cq_host->recovery_halt = false; 1053 mmc->cqe_on = false; 1054 spin_unlock_irqrestore(&cq_host->lock, flags); 1055 1056 /* Ensure all writes are done before interrupts are re-enabled */ 1057 wmb(); 1058 1059 cqhci_writel(cq_host, CQHCI_IS_HAC | CQHCI_IS_TCL, CQHCI_IS); 1060 1061 cqhci_set_irqs(cq_host, CQHCI_IS_MASK); 1062 1063 pr_debug("%s: cqhci: recovery done\n", mmc_hostname(mmc)); 1064} 1065 1066static const struct mmc_cqe_ops cqhci_cqe_ops = { 1067 .cqe_enable = cqhci_enable, 1068 .cqe_disable = cqhci_disable, 1069 .cqe_request = cqhci_request, 1070 .cqe_post_req = cqhci_post_req, 1071 .cqe_off = cqhci_off, 1072 .cqe_wait_for_idle = cqhci_wait_for_idle, 1073 .cqe_timeout = cqhci_timeout, 1074 .cqe_recovery_start = cqhci_recovery_start, 1075 .cqe_recovery_finish = cqhci_recovery_finish, 1076}; 1077 1078struct cqhci_host *cqhci_pltfm_init(struct platform_device *pdev) 1079{ 1080 struct cqhci_host *cq_host; 1081 struct resource *cqhci_memres = NULL; 1082 1083 /* check and setup CMDQ interface */ 1084 cqhci_memres = platform_get_resource_byname(pdev, IORESOURCE_MEM, 1085 "cqhci"); 1086 if (!cqhci_memres) { 1087 dev_dbg(&pdev->dev, "CMDQ not supported\n"); 1088 return ERR_PTR(-EINVAL); 1089 } 1090 1091 cq_host = devm_kzalloc(&pdev->dev, sizeof(*cq_host), GFP_KERNEL); 1092 if (!cq_host) 1093 return ERR_PTR(-ENOMEM); 1094 cq_host->mmio = devm_ioremap(&pdev->dev, 1095 cqhci_memres->start, 1096 resource_size(cqhci_memres)); 1097 if (!cq_host->mmio) { 1098 dev_err(&pdev->dev, "failed to remap cqhci regs\n"); 1099 return ERR_PTR(-EBUSY); 1100 } 1101 dev_dbg(&pdev->dev, "CMDQ ioremap: done\n"); 1102 1103 return cq_host; 1104} 1105EXPORT_SYMBOL(cqhci_pltfm_init); 1106 1107static unsigned int cqhci_ver_major(struct cqhci_host *cq_host) 1108{ 1109 return CQHCI_VER_MAJOR(cqhci_readl(cq_host, CQHCI_VER)); 1110} 1111 1112static unsigned int cqhci_ver_minor(struct cqhci_host *cq_host) 1113{ 1114 u32 ver = cqhci_readl(cq_host, CQHCI_VER); 1115 1116 return CQHCI_VER_MINOR1(ver) * 10 + CQHCI_VER_MINOR2(ver); 1117} 1118 1119int cqhci_init(struct cqhci_host *cq_host, struct mmc_host *mmc, 1120 bool dma64) 1121{ 1122 int err; 1123 1124 cq_host->dma64 = dma64; 1125 cq_host->mmc = mmc; 1126 cq_host->mmc->cqe_private = cq_host; 1127 1128 cq_host->num_slots = NUM_SLOTS; 1129 cq_host->dcmd_slot = DCMD_SLOT; 1130 1131 mmc->cqe_ops = &cqhci_cqe_ops; 1132 1133 mmc->cqe_qdepth = NUM_SLOTS; 1134 if (mmc->caps2 & MMC_CAP2_CQE_DCMD) 1135 mmc->cqe_qdepth -= 1; 1136 1137 cq_host->slot = devm_kcalloc(mmc_dev(mmc), cq_host->num_slots, 1138 sizeof(*cq_host->slot), GFP_KERNEL); 1139 if (!cq_host->slot) { 1140 err = -ENOMEM; 1141 goto out_err; 1142 } 1143 1144 spin_lock_init(&cq_host->lock); 1145 1146 init_completion(&cq_host->halt_comp); 1147 init_waitqueue_head(&cq_host->wait_queue); 1148 1149 pr_info("%s: CQHCI version %u.%02u\n", 1150 mmc_hostname(mmc), cqhci_ver_major(cq_host), 1151 cqhci_ver_minor(cq_host)); 1152 1153 return 0; 1154 1155out_err: 1156 pr_err("%s: CQHCI version %u.%02u failed to initialize, error %d\n", 1157 mmc_hostname(mmc), cqhci_ver_major(cq_host), 1158 cqhci_ver_minor(cq_host), err); 1159 return err; 1160} 1161EXPORT_SYMBOL(cqhci_init); 1162 1163MODULE_AUTHOR("Venkat Gopalakrishnan <venkatg@codeaurora.org>"); 1164MODULE_DESCRIPTION("Command Queue Host Controller Interface driver"); 1165MODULE_LICENSE("GPL v2");