Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.3 1277 lines 33 kB view raw
1/* 2 * linux/drivers/mmc/host/tmio_mmc_pio.c 3 * 4 * Copyright (C) 2011 Guennadi Liakhovetski 5 * Copyright (C) 2007 Ian Molton 6 * Copyright (C) 2004 Ian Molton 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 * 12 * Driver for the MMC / SD / SDIO IP found in: 13 * 14 * TC6393XB, TC6391XB, TC6387XB, T7L66XB, ASIC3, SH-Mobile SoCs 15 * 16 * This driver draws mainly on scattered spec sheets, Reverse engineering 17 * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit 18 * support). (Further 4 bit support from a later datasheet). 19 * 20 * TODO: 21 * Investigate using a workqueue for PIO transfers 22 * Eliminate FIXMEs 23 * SDIO support 24 * Better Power management 25 * Handle MMC errors better 26 * double buffer support 27 * 28 */ 29 30#include <linux/delay.h> 31#include <linux/device.h> 32#include <linux/highmem.h> 33#include <linux/interrupt.h> 34#include <linux/io.h> 35#include <linux/irq.h> 36#include <linux/mfd/tmio.h> 37#include <linux/mmc/host.h> 38#include <linux/mmc/mmc.h> 39#include <linux/mmc/slot-gpio.h> 40#include <linux/mmc/tmio.h> 41#include <linux/module.h> 42#include <linux/pagemap.h> 43#include <linux/platform_device.h> 44#include <linux/pm_qos.h> 45#include <linux/pm_runtime.h> 46#include <linux/regulator/consumer.h> 47#include <linux/mmc/sdio.h> 48#include <linux/scatterlist.h> 49#include <linux/spinlock.h> 50#include <linux/workqueue.h> 51 52#include "tmio_mmc.h" 53 54void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i) 55{ 56 host->sdcard_irq_mask &= ~(i & TMIO_MASK_IRQ); 57 sd_ctrl_write32(host, CTL_IRQ_MASK, host->sdcard_irq_mask); 58} 59 60void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i) 61{ 62 host->sdcard_irq_mask |= (i & TMIO_MASK_IRQ); 63 sd_ctrl_write32(host, CTL_IRQ_MASK, host->sdcard_irq_mask); 64} 65 66static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host *host, u32 i) 67{ 68 sd_ctrl_write32(host, CTL_STATUS, ~i); 69} 70 71static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data) 72{ 73 host->sg_len = data->sg_len; 74 host->sg_ptr = data->sg; 75 host->sg_orig = data->sg; 76 host->sg_off = 0; 77} 78 79static int tmio_mmc_next_sg(struct tmio_mmc_host *host) 80{ 81 host->sg_ptr = sg_next(host->sg_ptr); 82 host->sg_off = 0; 83 return --host->sg_len; 84} 85 86#define CMDREQ_TIMEOUT 5000 87 88#ifdef CONFIG_MMC_DEBUG 89 90#define STATUS_TO_TEXT(a, status, i) \ 91 do { \ 92 if (status & TMIO_STAT_##a) { \ 93 if (i++) \ 94 printk(" | "); \ 95 printk(#a); \ 96 } \ 97 } while (0) 98 99static void pr_debug_status(u32 status) 100{ 101 int i = 0; 102 pr_debug("status: %08x = ", status); 103 STATUS_TO_TEXT(CARD_REMOVE, status, i); 104 STATUS_TO_TEXT(CARD_INSERT, status, i); 105 STATUS_TO_TEXT(SIGSTATE, status, i); 106 STATUS_TO_TEXT(WRPROTECT, status, i); 107 STATUS_TO_TEXT(CARD_REMOVE_A, status, i); 108 STATUS_TO_TEXT(CARD_INSERT_A, status, i); 109 STATUS_TO_TEXT(SIGSTATE_A, status, i); 110 STATUS_TO_TEXT(CMD_IDX_ERR, status, i); 111 STATUS_TO_TEXT(STOPBIT_ERR, status, i); 112 STATUS_TO_TEXT(ILL_FUNC, status, i); 113 STATUS_TO_TEXT(CMD_BUSY, status, i); 114 STATUS_TO_TEXT(CMDRESPEND, status, i); 115 STATUS_TO_TEXT(DATAEND, status, i); 116 STATUS_TO_TEXT(CRCFAIL, status, i); 117 STATUS_TO_TEXT(DATATIMEOUT, status, i); 118 STATUS_TO_TEXT(CMDTIMEOUT, status, i); 119 STATUS_TO_TEXT(RXOVERFLOW, status, i); 120 STATUS_TO_TEXT(TXUNDERRUN, status, i); 121 STATUS_TO_TEXT(RXRDY, status, i); 122 STATUS_TO_TEXT(TXRQ, status, i); 123 STATUS_TO_TEXT(ILL_ACCESS, status, i); 124 printk("\n"); 125} 126 127#else 128#define pr_debug_status(s) do { } while (0) 129#endif 130 131static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) 132{ 133 struct tmio_mmc_host *host = mmc_priv(mmc); 134 135 if (enable && !host->sdio_irq_enabled) { 136 /* Keep device active while SDIO irq is enabled */ 137 pm_runtime_get_sync(mmc_dev(mmc)); 138 host->sdio_irq_enabled = true; 139 140 host->sdio_irq_mask = TMIO_SDIO_MASK_ALL & 141 ~TMIO_SDIO_STAT_IOIRQ; 142 sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001); 143 sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask); 144 } else if (!enable && host->sdio_irq_enabled) { 145 host->sdio_irq_mask = TMIO_SDIO_MASK_ALL; 146 sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask); 147 sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000); 148 149 host->sdio_irq_enabled = false; 150 pm_runtime_mark_last_busy(mmc_dev(mmc)); 151 pm_runtime_put_autosuspend(mmc_dev(mmc)); 152 } 153} 154 155static void tmio_mmc_set_clock(struct tmio_mmc_host *host, 156 unsigned int new_clock) 157{ 158 u32 clk = 0, clock; 159 160 if (new_clock) { 161 for (clock = host->mmc->f_min, clk = 0x80000080; 162 new_clock >= (clock<<1); clk >>= 1) 163 clock <<= 1; 164 165 /* 1/1 clock is option */ 166 if ((host->pdata->flags & TMIO_MMC_CLK_ACTUAL) && 167 ((clk >> 22) & 0x1)) 168 clk |= 0xff; 169 } 170 171 if (host->set_clk_div) 172 host->set_clk_div(host->pdev, (clk>>22) & 1); 173 174 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff); 175 msleep(10); 176} 177 178static void tmio_mmc_clk_stop(struct tmio_mmc_host *host) 179{ 180 /* implicit BUG_ON(!res) */ 181 if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) { 182 sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000); 183 msleep(10); 184 } 185 186 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 & 187 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); 188 msleep(10); 189} 190 191static void tmio_mmc_clk_start(struct tmio_mmc_host *host) 192{ 193 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 | 194 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); 195 msleep(10); 196 197 /* implicit BUG_ON(!res) */ 198 if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) { 199 sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100); 200 msleep(10); 201 } 202} 203 204static void tmio_mmc_reset(struct tmio_mmc_host *host) 205{ 206 /* FIXME - should we set stop clock reg here */ 207 sd_ctrl_write16(host, CTL_RESET_SD, 0x0000); 208 /* implicit BUG_ON(!res) */ 209 if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) 210 sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000); 211 msleep(10); 212 sd_ctrl_write16(host, CTL_RESET_SD, 0x0001); 213 if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) 214 sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001); 215 msleep(10); 216} 217 218static void tmio_mmc_reset_work(struct work_struct *work) 219{ 220 struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host, 221 delayed_reset_work.work); 222 struct mmc_request *mrq; 223 unsigned long flags; 224 225 spin_lock_irqsave(&host->lock, flags); 226 mrq = host->mrq; 227 228 /* 229 * is request already finished? Since we use a non-blocking 230 * cancel_delayed_work(), it can happen, that a .set_ios() call preempts 231 * us, so, have to check for IS_ERR(host->mrq) 232 */ 233 if (IS_ERR_OR_NULL(mrq) 234 || time_is_after_jiffies(host->last_req_ts + 235 msecs_to_jiffies(CMDREQ_TIMEOUT))) { 236 spin_unlock_irqrestore(&host->lock, flags); 237 return; 238 } 239 240 dev_warn(&host->pdev->dev, 241 "timeout waiting for hardware interrupt (CMD%u)\n", 242 mrq->cmd->opcode); 243 244 if (host->data) 245 host->data->error = -ETIMEDOUT; 246 else if (host->cmd) 247 host->cmd->error = -ETIMEDOUT; 248 else 249 mrq->cmd->error = -ETIMEDOUT; 250 251 host->cmd = NULL; 252 host->data = NULL; 253 host->force_pio = false; 254 255 spin_unlock_irqrestore(&host->lock, flags); 256 257 tmio_mmc_reset(host); 258 259 /* Ready for new calls */ 260 host->mrq = NULL; 261 262 tmio_mmc_abort_dma(host); 263 mmc_request_done(host->mmc, mrq); 264 265 pm_runtime_mark_last_busy(mmc_dev(host->mmc)); 266 pm_runtime_put_autosuspend(mmc_dev(host->mmc)); 267} 268 269/* called with host->lock held, interrupts disabled */ 270static void tmio_mmc_finish_request(struct tmio_mmc_host *host) 271{ 272 struct mmc_request *mrq; 273 unsigned long flags; 274 275 spin_lock_irqsave(&host->lock, flags); 276 277 mrq = host->mrq; 278 if (IS_ERR_OR_NULL(mrq)) { 279 spin_unlock_irqrestore(&host->lock, flags); 280 return; 281 } 282 283 host->cmd = NULL; 284 host->data = NULL; 285 host->force_pio = false; 286 287 cancel_delayed_work(&host->delayed_reset_work); 288 289 host->mrq = NULL; 290 spin_unlock_irqrestore(&host->lock, flags); 291 292 if (mrq->cmd->error || (mrq->data && mrq->data->error)) 293 tmio_mmc_abort_dma(host); 294 295 mmc_request_done(host->mmc, mrq); 296 297 pm_runtime_mark_last_busy(mmc_dev(host->mmc)); 298 pm_runtime_put_autosuspend(mmc_dev(host->mmc)); 299} 300 301static void tmio_mmc_done_work(struct work_struct *work) 302{ 303 struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host, 304 done); 305 tmio_mmc_finish_request(host); 306} 307 308/* These are the bitmasks the tmio chip requires to implement the MMC response 309 * types. Note that R1 and R6 are the same in this scheme. */ 310#define APP_CMD 0x0040 311#define RESP_NONE 0x0300 312#define RESP_R1 0x0400 313#define RESP_R1B 0x0500 314#define RESP_R2 0x0600 315#define RESP_R3 0x0700 316#define DATA_PRESENT 0x0800 317#define TRANSFER_READ 0x1000 318#define TRANSFER_MULTI 0x2000 319#define SECURITY_CMD 0x4000 320#define NO_CMD12_ISSUE 0x4000 /* TMIO_MMC_HAVE_CMD12_CTRL */ 321 322static int tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd) 323{ 324 struct mmc_data *data = host->data; 325 int c = cmd->opcode; 326 u32 irq_mask = TMIO_MASK_CMD; 327 328 /* CMD12 is handled by hardware */ 329 if (cmd->opcode == MMC_STOP_TRANSMISSION && !cmd->arg) { 330 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x001); 331 return 0; 332 } 333 334 switch (mmc_resp_type(cmd)) { 335 case MMC_RSP_NONE: c |= RESP_NONE; break; 336 case MMC_RSP_R1: c |= RESP_R1; break; 337 case MMC_RSP_R1B: c |= RESP_R1B; break; 338 case MMC_RSP_R2: c |= RESP_R2; break; 339 case MMC_RSP_R3: c |= RESP_R3; break; 340 default: 341 pr_debug("Unknown response type %d\n", mmc_resp_type(cmd)); 342 return -EINVAL; 343 } 344 345 host->cmd = cmd; 346 347/* FIXME - this seems to be ok commented out but the spec suggest this bit 348 * should be set when issuing app commands. 349 * if(cmd->flags & MMC_FLAG_ACMD) 350 * c |= APP_CMD; 351 */ 352 if (data) { 353 c |= DATA_PRESENT; 354 if (data->blocks > 1) { 355 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x100); 356 c |= TRANSFER_MULTI; 357 358 /* 359 * Disable auto CMD12 at IO_RW_EXTENDED when 360 * multiple block transfer 361 */ 362 if ((host->pdata->flags & TMIO_MMC_HAVE_CMD12_CTRL) && 363 (cmd->opcode == SD_IO_RW_EXTENDED)) 364 c |= NO_CMD12_ISSUE; 365 } 366 if (data->flags & MMC_DATA_READ) 367 c |= TRANSFER_READ; 368 } 369 370 if (!host->native_hotplug) 371 irq_mask &= ~(TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT); 372 tmio_mmc_enable_mmc_irqs(host, irq_mask); 373 374 /* Fire off the command */ 375 sd_ctrl_write32(host, CTL_ARG_REG, cmd->arg); 376 sd_ctrl_write16(host, CTL_SD_CMD, c); 377 378 return 0; 379} 380 381static void tmio_mmc_transfer_data(struct tmio_mmc_host *host, 382 unsigned short *buf, 383 unsigned int count) 384{ 385 int is_read = host->data->flags & MMC_DATA_READ; 386 u8 *buf8; 387 388 /* 389 * Transfer the data 390 */ 391 if (is_read) 392 sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1); 393 else 394 sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1); 395 396 /* if count was even number */ 397 if (!(count & 0x1)) 398 return; 399 400 /* if count was odd number */ 401 buf8 = (u8 *)(buf + (count >> 1)); 402 403 /* 404 * FIXME 405 * 406 * driver and this function are assuming that 407 * it is used as little endian 408 */ 409 if (is_read) 410 *buf8 = sd_ctrl_read16(host, CTL_SD_DATA_PORT) & 0xff; 411 else 412 sd_ctrl_write16(host, CTL_SD_DATA_PORT, *buf8); 413} 414 415/* 416 * This chip always returns (at least?) as much data as you ask for. 417 * I'm unsure what happens if you ask for less than a block. This should be 418 * looked into to ensure that a funny length read doesn't hose the controller. 419 */ 420static void tmio_mmc_pio_irq(struct tmio_mmc_host *host) 421{ 422 struct mmc_data *data = host->data; 423 void *sg_virt; 424 unsigned short *buf; 425 unsigned int count; 426 unsigned long flags; 427 428 if ((host->chan_tx || host->chan_rx) && !host->force_pio) { 429 pr_err("PIO IRQ in DMA mode!\n"); 430 return; 431 } else if (!data) { 432 pr_debug("Spurious PIO IRQ\n"); 433 return; 434 } 435 436 sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags); 437 buf = (unsigned short *)(sg_virt + host->sg_off); 438 439 count = host->sg_ptr->length - host->sg_off; 440 if (count > data->blksz) 441 count = data->blksz; 442 443 pr_debug("count: %08x offset: %08x flags %08x\n", 444 count, host->sg_off, data->flags); 445 446 /* Transfer the data */ 447 tmio_mmc_transfer_data(host, buf, count); 448 449 host->sg_off += count; 450 451 tmio_mmc_kunmap_atomic(host->sg_ptr, &flags, sg_virt); 452 453 if (host->sg_off == host->sg_ptr->length) 454 tmio_mmc_next_sg(host); 455 456 return; 457} 458 459static void tmio_mmc_check_bounce_buffer(struct tmio_mmc_host *host) 460{ 461 if (host->sg_ptr == &host->bounce_sg) { 462 unsigned long flags; 463 void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags); 464 memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length); 465 tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr); 466 } 467} 468 469/* needs to be called with host->lock held */ 470void tmio_mmc_do_data_irq(struct tmio_mmc_host *host) 471{ 472 struct mmc_data *data = host->data; 473 struct mmc_command *stop; 474 475 host->data = NULL; 476 477 if (!data) { 478 dev_warn(&host->pdev->dev, "Spurious data end IRQ\n"); 479 return; 480 } 481 stop = data->stop; 482 483 /* FIXME - return correct transfer count on errors */ 484 if (!data->error) 485 data->bytes_xfered = data->blocks * data->blksz; 486 else 487 data->bytes_xfered = 0; 488 489 pr_debug("Completed data request\n"); 490 491 /* 492 * FIXME: other drivers allow an optional stop command of any given type 493 * which we dont do, as the chip can auto generate them. 494 * Perhaps we can be smarter about when to use auto CMD12 and 495 * only issue the auto request when we know this is the desired 496 * stop command, allowing fallback to the stop command the 497 * upper layers expect. For now, we do what works. 498 */ 499 500 if (data->flags & MMC_DATA_READ) { 501 if (host->chan_rx && !host->force_pio) 502 tmio_mmc_check_bounce_buffer(host); 503 dev_dbg(&host->pdev->dev, "Complete Rx request %p\n", 504 host->mrq); 505 } else { 506 dev_dbg(&host->pdev->dev, "Complete Tx request %p\n", 507 host->mrq); 508 } 509 510 if (stop) { 511 if (stop->opcode == MMC_STOP_TRANSMISSION && !stop->arg) 512 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x000); 513 else 514 BUG(); 515 } 516 517 schedule_work(&host->done); 518} 519 520static void tmio_mmc_data_irq(struct tmio_mmc_host *host) 521{ 522 struct mmc_data *data; 523 spin_lock(&host->lock); 524 data = host->data; 525 526 if (!data) 527 goto out; 528 529 if (host->chan_tx && (data->flags & MMC_DATA_WRITE) && !host->force_pio) { 530 u32 status = sd_ctrl_read32(host, CTL_STATUS); 531 bool done = false; 532 533 /* 534 * Has all data been written out yet? Testing on SuperH showed, 535 * that in most cases the first interrupt comes already with the 536 * BUSY status bit clear, but on some operations, like mount or 537 * in the beginning of a write / sync / umount, there is one 538 * DATAEND interrupt with the BUSY bit set, in this cases 539 * waiting for one more interrupt fixes the problem. 540 */ 541 if (host->pdata->flags & TMIO_MMC_HAS_IDLE_WAIT) { 542 if (status & TMIO_STAT_ILL_FUNC) 543 done = true; 544 } else { 545 if (!(status & TMIO_STAT_CMD_BUSY)) 546 done = true; 547 } 548 549 if (done) { 550 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND); 551 tasklet_schedule(&host->dma_complete); 552 } 553 } else if (host->chan_rx && (data->flags & MMC_DATA_READ) && !host->force_pio) { 554 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND); 555 tasklet_schedule(&host->dma_complete); 556 } else { 557 tmio_mmc_do_data_irq(host); 558 tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_READOP | TMIO_MASK_WRITEOP); 559 } 560out: 561 spin_unlock(&host->lock); 562} 563 564static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, 565 unsigned int stat) 566{ 567 struct mmc_command *cmd = host->cmd; 568 int i, addr; 569 570 spin_lock(&host->lock); 571 572 if (!host->cmd) { 573 pr_debug("Spurious CMD irq\n"); 574 goto out; 575 } 576 577 host->cmd = NULL; 578 579 /* This controller is sicker than the PXA one. Not only do we need to 580 * drop the top 8 bits of the first response word, we also need to 581 * modify the order of the response for short response command types. 582 */ 583 584 for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4) 585 cmd->resp[i] = sd_ctrl_read32(host, addr); 586 587 if (cmd->flags & MMC_RSP_136) { 588 cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24); 589 cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24); 590 cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24); 591 cmd->resp[3] <<= 8; 592 } else if (cmd->flags & MMC_RSP_R3) { 593 cmd->resp[0] = cmd->resp[3]; 594 } 595 596 if (stat & TMIO_STAT_CMDTIMEOUT) 597 cmd->error = -ETIMEDOUT; 598 else if (stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC) 599 cmd->error = -EILSEQ; 600 601 /* If there is data to handle we enable data IRQs here, and 602 * we will ultimatley finish the request in the data_end handler. 603 * If theres no data or we encountered an error, finish now. 604 */ 605 if (host->data && !cmd->error) { 606 if (host->data->flags & MMC_DATA_READ) { 607 if (host->force_pio || !host->chan_rx) 608 tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_READOP); 609 else 610 tasklet_schedule(&host->dma_issue); 611 } else { 612 if (host->force_pio || !host->chan_tx) 613 tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_WRITEOP); 614 else 615 tasklet_schedule(&host->dma_issue); 616 } 617 } else { 618 schedule_work(&host->done); 619 } 620 621out: 622 spin_unlock(&host->lock); 623} 624 625static void tmio_mmc_card_irq_status(struct tmio_mmc_host *host, 626 int *ireg, int *status) 627{ 628 *status = sd_ctrl_read32(host, CTL_STATUS); 629 *ireg = *status & TMIO_MASK_IRQ & ~host->sdcard_irq_mask; 630 631 pr_debug_status(*status); 632 pr_debug_status(*ireg); 633 634 /* Clear the status except the interrupt status */ 635 sd_ctrl_write32(host, CTL_STATUS, TMIO_MASK_IRQ); 636} 637 638static bool __tmio_mmc_card_detect_irq(struct tmio_mmc_host *host, 639 int ireg, int status) 640{ 641 struct mmc_host *mmc = host->mmc; 642 643 /* Card insert / remove attempts */ 644 if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) { 645 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT | 646 TMIO_STAT_CARD_REMOVE); 647 if ((((ireg & TMIO_STAT_CARD_REMOVE) && mmc->card) || 648 ((ireg & TMIO_STAT_CARD_INSERT) && !mmc->card)) && 649 !work_pending(&mmc->detect.work)) 650 mmc_detect_change(host->mmc, msecs_to_jiffies(100)); 651 return true; 652 } 653 654 return false; 655} 656 657irqreturn_t tmio_mmc_card_detect_irq(int irq, void *devid) 658{ 659 unsigned int ireg, status; 660 struct tmio_mmc_host *host = devid; 661 662 tmio_mmc_card_irq_status(host, &ireg, &status); 663 __tmio_mmc_card_detect_irq(host, ireg, status); 664 665 return IRQ_HANDLED; 666} 667EXPORT_SYMBOL(tmio_mmc_card_detect_irq); 668 669static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host *host, 670 int ireg, int status) 671{ 672 /* Command completion */ 673 if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) { 674 tmio_mmc_ack_mmc_irqs(host, 675 TMIO_STAT_CMDRESPEND | 676 TMIO_STAT_CMDTIMEOUT); 677 tmio_mmc_cmd_irq(host, status); 678 return true; 679 } 680 681 /* Data transfer */ 682 if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) { 683 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ); 684 tmio_mmc_pio_irq(host); 685 return true; 686 } 687 688 /* Data transfer completion */ 689 if (ireg & TMIO_STAT_DATAEND) { 690 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND); 691 tmio_mmc_data_irq(host); 692 return true; 693 } 694 695 return false; 696} 697 698irqreturn_t tmio_mmc_sdcard_irq(int irq, void *devid) 699{ 700 unsigned int ireg, status; 701 struct tmio_mmc_host *host = devid; 702 703 tmio_mmc_card_irq_status(host, &ireg, &status); 704 __tmio_mmc_sdcard_irq(host, ireg, status); 705 706 return IRQ_HANDLED; 707} 708EXPORT_SYMBOL(tmio_mmc_sdcard_irq); 709 710irqreturn_t tmio_mmc_sdio_irq(int irq, void *devid) 711{ 712 struct tmio_mmc_host *host = devid; 713 struct mmc_host *mmc = host->mmc; 714 struct tmio_mmc_data *pdata = host->pdata; 715 unsigned int ireg, status; 716 unsigned int sdio_status; 717 718 if (!(pdata->flags & TMIO_MMC_SDIO_IRQ)) 719 return IRQ_HANDLED; 720 721 status = sd_ctrl_read16(host, CTL_SDIO_STATUS); 722 ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdcard_irq_mask; 723 724 sdio_status = status & ~TMIO_SDIO_MASK_ALL; 725 if (pdata->flags & TMIO_MMC_SDIO_STATUS_QUIRK) 726 sdio_status |= 6; 727 728 sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status); 729 730 if (mmc->caps & MMC_CAP_SDIO_IRQ && ireg & TMIO_SDIO_STAT_IOIRQ) 731 mmc_signal_sdio_irq(mmc); 732 733 return IRQ_HANDLED; 734} 735EXPORT_SYMBOL(tmio_mmc_sdio_irq); 736 737irqreturn_t tmio_mmc_irq(int irq, void *devid) 738{ 739 struct tmio_mmc_host *host = devid; 740 unsigned int ireg, status; 741 742 pr_debug("MMC IRQ begin\n"); 743 744 tmio_mmc_card_irq_status(host, &ireg, &status); 745 if (__tmio_mmc_card_detect_irq(host, ireg, status)) 746 return IRQ_HANDLED; 747 if (__tmio_mmc_sdcard_irq(host, ireg, status)) 748 return IRQ_HANDLED; 749 750 tmio_mmc_sdio_irq(irq, devid); 751 752 return IRQ_HANDLED; 753} 754EXPORT_SYMBOL(tmio_mmc_irq); 755 756static int tmio_mmc_start_data(struct tmio_mmc_host *host, 757 struct mmc_data *data) 758{ 759 struct tmio_mmc_data *pdata = host->pdata; 760 761 pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n", 762 data->blksz, data->blocks); 763 764 /* Some hardware cannot perform 2 byte requests in 4 bit mode */ 765 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) { 766 int blksz_2bytes = pdata->flags & TMIO_MMC_BLKSZ_2BYTES; 767 768 if (data->blksz < 2 || (data->blksz < 4 && !blksz_2bytes)) { 769 pr_err("%s: %d byte block unsupported in 4 bit mode\n", 770 mmc_hostname(host->mmc), data->blksz); 771 return -EINVAL; 772 } 773 } 774 775 tmio_mmc_init_sg(host, data); 776 host->data = data; 777 778 /* Set transfer length / blocksize */ 779 sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz); 780 sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks); 781 782 tmio_mmc_start_dma(host, data); 783 784 return 0; 785} 786 787/* Process requests from the MMC layer */ 788static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) 789{ 790 struct tmio_mmc_host *host = mmc_priv(mmc); 791 unsigned long flags; 792 int ret; 793 794 spin_lock_irqsave(&host->lock, flags); 795 796 if (host->mrq) { 797 pr_debug("request not null\n"); 798 if (IS_ERR(host->mrq)) { 799 spin_unlock_irqrestore(&host->lock, flags); 800 mrq->cmd->error = -EAGAIN; 801 mmc_request_done(mmc, mrq); 802 return; 803 } 804 } 805 806 host->last_req_ts = jiffies; 807 wmb(); 808 host->mrq = mrq; 809 810 spin_unlock_irqrestore(&host->lock, flags); 811 812 pm_runtime_get_sync(mmc_dev(mmc)); 813 814 if (mrq->data) { 815 ret = tmio_mmc_start_data(host, mrq->data); 816 if (ret) 817 goto fail; 818 } 819 820 ret = tmio_mmc_start_command(host, mrq->cmd); 821 if (!ret) { 822 schedule_delayed_work(&host->delayed_reset_work, 823 msecs_to_jiffies(CMDREQ_TIMEOUT)); 824 return; 825 } 826 827fail: 828 host->force_pio = false; 829 host->mrq = NULL; 830 mrq->cmd->error = ret; 831 mmc_request_done(mmc, mrq); 832 833 pm_runtime_mark_last_busy(mmc_dev(mmc)); 834 pm_runtime_put_autosuspend(mmc_dev(mmc)); 835} 836 837static int tmio_mmc_clk_update(struct tmio_mmc_host *host) 838{ 839 struct mmc_host *mmc = host->mmc; 840 int ret; 841 842 if (!host->clk_enable) 843 return -ENOTSUPP; 844 845 ret = host->clk_enable(host->pdev, &mmc->f_max); 846 if (!ret) 847 mmc->f_min = mmc->f_max / 512; 848 849 return ret; 850} 851 852static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd) 853{ 854 struct mmc_host *mmc = host->mmc; 855 int ret = 0; 856 857 /* .set_ios() is returning void, so, no chance to report an error */ 858 859 if (host->set_pwr) 860 host->set_pwr(host->pdev, 1); 861 862 if (!IS_ERR(mmc->supply.vmmc)) { 863 ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); 864 /* 865 * Attention: empiric value. With a b43 WiFi SDIO card this 866 * delay proved necessary for reliable card-insertion probing. 867 * 100us were not enough. Is this the same 140us delay, as in 868 * tmio_mmc_set_ios()? 869 */ 870 udelay(200); 871 } 872 /* 873 * It seems, VccQ should be switched on after Vcc, this is also what the 874 * omap_hsmmc.c driver does. 875 */ 876 if (!IS_ERR(mmc->supply.vqmmc) && !ret) { 877 ret = regulator_enable(mmc->supply.vqmmc); 878 udelay(200); 879 } 880 881 if (ret < 0) 882 dev_dbg(&host->pdev->dev, "Regulators failed to power up: %d\n", 883 ret); 884} 885 886static void tmio_mmc_power_off(struct tmio_mmc_host *host) 887{ 888 struct mmc_host *mmc = host->mmc; 889 890 if (!IS_ERR(mmc->supply.vqmmc)) 891 regulator_disable(mmc->supply.vqmmc); 892 893 if (!IS_ERR(mmc->supply.vmmc)) 894 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 895 896 if (host->set_pwr) 897 host->set_pwr(host->pdev, 0); 898} 899 900static void tmio_mmc_set_bus_width(struct tmio_mmc_host *host, 901 unsigned char bus_width) 902{ 903 switch (bus_width) { 904 case MMC_BUS_WIDTH_1: 905 sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0); 906 break; 907 case MMC_BUS_WIDTH_4: 908 sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0); 909 break; 910 } 911} 912 913/* Set MMC clock / power. 914 * Note: This controller uses a simple divider scheme therefore it cannot 915 * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as 916 * MMC wont run that fast, it has to be clocked at 12MHz which is the next 917 * slowest setting. 918 */ 919static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 920{ 921 struct tmio_mmc_host *host = mmc_priv(mmc); 922 struct device *dev = &host->pdev->dev; 923 unsigned long flags; 924 925 pm_runtime_get_sync(mmc_dev(mmc)); 926 927 mutex_lock(&host->ios_lock); 928 929 spin_lock_irqsave(&host->lock, flags); 930 if (host->mrq) { 931 if (IS_ERR(host->mrq)) { 932 dev_dbg(dev, 933 "%s.%d: concurrent .set_ios(), clk %u, mode %u\n", 934 current->comm, task_pid_nr(current), 935 ios->clock, ios->power_mode); 936 host->mrq = ERR_PTR(-EINTR); 937 } else { 938 dev_dbg(dev, 939 "%s.%d: CMD%u active since %lu, now %lu!\n", 940 current->comm, task_pid_nr(current), 941 host->mrq->cmd->opcode, host->last_req_ts, jiffies); 942 } 943 spin_unlock_irqrestore(&host->lock, flags); 944 945 mutex_unlock(&host->ios_lock); 946 return; 947 } 948 949 host->mrq = ERR_PTR(-EBUSY); 950 951 spin_unlock_irqrestore(&host->lock, flags); 952 953 switch (ios->power_mode) { 954 case MMC_POWER_OFF: 955 tmio_mmc_power_off(host); 956 tmio_mmc_clk_stop(host); 957 break; 958 case MMC_POWER_UP: 959 tmio_mmc_set_clock(host, ios->clock); 960 tmio_mmc_power_on(host, ios->vdd); 961 tmio_mmc_clk_start(host); 962 tmio_mmc_set_bus_width(host, ios->bus_width); 963 break; 964 case MMC_POWER_ON: 965 tmio_mmc_set_clock(host, ios->clock); 966 tmio_mmc_clk_start(host); 967 tmio_mmc_set_bus_width(host, ios->bus_width); 968 break; 969 } 970 971 /* Let things settle. delay taken from winCE driver */ 972 udelay(140); 973 if (PTR_ERR(host->mrq) == -EINTR) 974 dev_dbg(&host->pdev->dev, 975 "%s.%d: IOS interrupted: clk %u, mode %u", 976 current->comm, task_pid_nr(current), 977 ios->clock, ios->power_mode); 978 host->mrq = NULL; 979 980 host->clk_cache = ios->clock; 981 982 mutex_unlock(&host->ios_lock); 983 984 pm_runtime_mark_last_busy(mmc_dev(mmc)); 985 pm_runtime_put_autosuspend(mmc_dev(mmc)); 986} 987 988static int tmio_mmc_get_ro(struct mmc_host *mmc) 989{ 990 struct tmio_mmc_host *host = mmc_priv(mmc); 991 struct tmio_mmc_data *pdata = host->pdata; 992 int ret = mmc_gpio_get_ro(mmc); 993 if (ret >= 0) 994 return ret; 995 996 pm_runtime_get_sync(mmc_dev(mmc)); 997 ret = !((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) || 998 (sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT)); 999 pm_runtime_mark_last_busy(mmc_dev(mmc)); 1000 pm_runtime_put_autosuspend(mmc_dev(mmc)); 1001 1002 return ret; 1003} 1004 1005static int tmio_multi_io_quirk(struct mmc_card *card, 1006 unsigned int direction, int blk_size) 1007{ 1008 struct tmio_mmc_host *host = mmc_priv(card->host); 1009 1010 if (host->multi_io_quirk) 1011 return host->multi_io_quirk(card, direction, blk_size); 1012 1013 return blk_size; 1014} 1015 1016static const struct mmc_host_ops tmio_mmc_ops = { 1017 .request = tmio_mmc_request, 1018 .set_ios = tmio_mmc_set_ios, 1019 .get_ro = tmio_mmc_get_ro, 1020 .get_cd = mmc_gpio_get_cd, 1021 .enable_sdio_irq = tmio_mmc_enable_sdio_irq, 1022 .multi_io_quirk = tmio_multi_io_quirk, 1023}; 1024 1025static int tmio_mmc_init_ocr(struct tmio_mmc_host *host) 1026{ 1027 struct tmio_mmc_data *pdata = host->pdata; 1028 struct mmc_host *mmc = host->mmc; 1029 1030 mmc_regulator_get_supply(mmc); 1031 1032 /* use ocr_mask if no regulator */ 1033 if (!mmc->ocr_avail) 1034 mmc->ocr_avail = pdata->ocr_mask; 1035 1036 /* 1037 * try again. 1038 * There is possibility that regulator has not been probed 1039 */ 1040 if (!mmc->ocr_avail) 1041 return -EPROBE_DEFER; 1042 1043 return 0; 1044} 1045 1046static void tmio_mmc_of_parse(struct platform_device *pdev, 1047 struct tmio_mmc_data *pdata) 1048{ 1049 const struct device_node *np = pdev->dev.of_node; 1050 if (!np) 1051 return; 1052 1053 if (of_get_property(np, "toshiba,mmc-wrprotect-disable", NULL)) 1054 pdata->flags |= TMIO_MMC_WRPROTECT_DISABLE; 1055} 1056 1057struct tmio_mmc_host* 1058tmio_mmc_host_alloc(struct platform_device *pdev) 1059{ 1060 struct tmio_mmc_host *host; 1061 struct mmc_host *mmc; 1062 1063 mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &pdev->dev); 1064 if (!mmc) 1065 return NULL; 1066 1067 host = mmc_priv(mmc); 1068 host->mmc = mmc; 1069 host->pdev = pdev; 1070 1071 return host; 1072} 1073EXPORT_SYMBOL(tmio_mmc_host_alloc); 1074 1075void tmio_mmc_host_free(struct tmio_mmc_host *host) 1076{ 1077 mmc_free_host(host->mmc); 1078} 1079EXPORT_SYMBOL(tmio_mmc_host_free); 1080 1081int tmio_mmc_host_probe(struct tmio_mmc_host *_host, 1082 struct tmio_mmc_data *pdata) 1083{ 1084 struct platform_device *pdev = _host->pdev; 1085 struct mmc_host *mmc = _host->mmc; 1086 struct resource *res_ctl; 1087 int ret; 1088 u32 irq_mask = TMIO_MASK_CMD; 1089 1090 tmio_mmc_of_parse(pdev, pdata); 1091 1092 if (!(pdata->flags & TMIO_MMC_HAS_IDLE_WAIT)) 1093 _host->write16_hook = NULL; 1094 1095 res_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1096 if (!res_ctl) 1097 return -EINVAL; 1098 1099 ret = mmc_of_parse(mmc); 1100 if (ret < 0) 1101 goto host_free; 1102 1103 _host->pdata = pdata; 1104 platform_set_drvdata(pdev, mmc); 1105 1106 _host->set_pwr = pdata->set_pwr; 1107 _host->set_clk_div = pdata->set_clk_div; 1108 1109 ret = tmio_mmc_init_ocr(_host); 1110 if (ret < 0) 1111 goto host_free; 1112 1113 _host->ctl = devm_ioremap(&pdev->dev, 1114 res_ctl->start, resource_size(res_ctl)); 1115 if (!_host->ctl) { 1116 ret = -ENOMEM; 1117 goto host_free; 1118 } 1119 1120 mmc->ops = &tmio_mmc_ops; 1121 mmc->caps |= MMC_CAP_4_BIT_DATA | pdata->capabilities; 1122 mmc->caps2 |= pdata->capabilities2; 1123 mmc->max_segs = 32; 1124 mmc->max_blk_size = 512; 1125 mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) * 1126 mmc->max_segs; 1127 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 1128 mmc->max_seg_size = mmc->max_req_size; 1129 1130 _host->native_hotplug = !(pdata->flags & TMIO_MMC_USE_GPIO_CD || 1131 mmc->caps & MMC_CAP_NEEDS_POLL || 1132 mmc->caps & MMC_CAP_NONREMOVABLE || 1133 mmc->slot.cd_irq >= 0); 1134 1135 if (tmio_mmc_clk_update(_host) < 0) { 1136 mmc->f_max = pdata->hclk; 1137 mmc->f_min = mmc->f_max / 512; 1138 } 1139 1140 /* 1141 * Check the sanity of mmc->f_min to prevent tmio_mmc_set_clock() from 1142 * looping forever... 1143 */ 1144 if (mmc->f_min == 0) { 1145 ret = -EINVAL; 1146 goto host_free; 1147 } 1148 1149 /* 1150 * While using internal tmio hardware logic for card detection, we need 1151 * to ensure it stays powered for it to work. 1152 */ 1153 if (_host->native_hotplug) 1154 pm_runtime_get_noresume(&pdev->dev); 1155 1156 tmio_mmc_clk_stop(_host); 1157 tmio_mmc_reset(_host); 1158 1159 _host->sdcard_irq_mask = sd_ctrl_read32(_host, CTL_IRQ_MASK); 1160 tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL); 1161 1162 /* Unmask the IRQs we want to know about */ 1163 if (!_host->chan_rx) 1164 irq_mask |= TMIO_MASK_READOP; 1165 if (!_host->chan_tx) 1166 irq_mask |= TMIO_MASK_WRITEOP; 1167 if (!_host->native_hotplug) 1168 irq_mask &= ~(TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT); 1169 1170 _host->sdcard_irq_mask &= ~irq_mask; 1171 1172 _host->sdio_irq_enabled = false; 1173 if (pdata->flags & TMIO_MMC_SDIO_IRQ) { 1174 _host->sdio_irq_mask = TMIO_SDIO_MASK_ALL; 1175 sd_ctrl_write16(_host, CTL_SDIO_IRQ_MASK, _host->sdio_irq_mask); 1176 sd_ctrl_write16(_host, CTL_TRANSACTION_CTL, 0x0000); 1177 } 1178 1179 spin_lock_init(&_host->lock); 1180 mutex_init(&_host->ios_lock); 1181 1182 /* Init delayed work for request timeouts */ 1183 INIT_DELAYED_WORK(&_host->delayed_reset_work, tmio_mmc_reset_work); 1184 INIT_WORK(&_host->done, tmio_mmc_done_work); 1185 1186 /* See if we also get DMA */ 1187 tmio_mmc_request_dma(_host, pdata); 1188 1189 pm_runtime_set_active(&pdev->dev); 1190 pm_runtime_set_autosuspend_delay(&pdev->dev, 50); 1191 pm_runtime_use_autosuspend(&pdev->dev); 1192 pm_runtime_enable(&pdev->dev); 1193 1194 ret = mmc_add_host(mmc); 1195 if (ret < 0) { 1196 tmio_mmc_host_remove(_host); 1197 return ret; 1198 } 1199 1200 dev_pm_qos_expose_latency_limit(&pdev->dev, 100); 1201 1202 if (pdata->flags & TMIO_MMC_USE_GPIO_CD) { 1203 ret = mmc_gpio_request_cd(mmc, pdata->cd_gpio, 0); 1204 if (ret < 0) { 1205 tmio_mmc_host_remove(_host); 1206 return ret; 1207 } 1208 mmc_gpiod_request_cd_irq(mmc); 1209 } 1210 1211 return 0; 1212 1213host_free: 1214 1215 return ret; 1216} 1217EXPORT_SYMBOL(tmio_mmc_host_probe); 1218 1219void tmio_mmc_host_remove(struct tmio_mmc_host *host) 1220{ 1221 struct platform_device *pdev = host->pdev; 1222 struct mmc_host *mmc = host->mmc; 1223 1224 if (!host->native_hotplug) 1225 pm_runtime_get_sync(&pdev->dev); 1226 1227 dev_pm_qos_hide_latency_limit(&pdev->dev); 1228 1229 mmc_remove_host(mmc); 1230 cancel_work_sync(&host->done); 1231 cancel_delayed_work_sync(&host->delayed_reset_work); 1232 tmio_mmc_release_dma(host); 1233 1234 pm_runtime_put_sync(&pdev->dev); 1235 pm_runtime_disable(&pdev->dev); 1236} 1237EXPORT_SYMBOL(tmio_mmc_host_remove); 1238 1239#ifdef CONFIG_PM 1240int tmio_mmc_host_runtime_suspend(struct device *dev) 1241{ 1242 struct mmc_host *mmc = dev_get_drvdata(dev); 1243 struct tmio_mmc_host *host = mmc_priv(mmc); 1244 1245 tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL); 1246 1247 if (host->clk_cache) 1248 tmio_mmc_clk_stop(host); 1249 1250 if (host->clk_disable) 1251 host->clk_disable(host->pdev); 1252 1253 return 0; 1254} 1255EXPORT_SYMBOL(tmio_mmc_host_runtime_suspend); 1256 1257int tmio_mmc_host_runtime_resume(struct device *dev) 1258{ 1259 struct mmc_host *mmc = dev_get_drvdata(dev); 1260 struct tmio_mmc_host *host = mmc_priv(mmc); 1261 1262 tmio_mmc_reset(host); 1263 tmio_mmc_clk_update(host); 1264 1265 if (host->clk_cache) { 1266 tmio_mmc_set_clock(host, host->clk_cache); 1267 tmio_mmc_clk_start(host); 1268 } 1269 1270 tmio_mmc_enable_dma(host, true); 1271 1272 return 0; 1273} 1274EXPORT_SYMBOL(tmio_mmc_host_runtime_resume); 1275#endif 1276 1277MODULE_LICENSE("GPL v2");