Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * linux/drivers/mmc/core/core.c
3 *
4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
5 * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
6 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
7 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/interrupt.h>
16#include <linux/completion.h>
17#include <linux/device.h>
18#include <linux/delay.h>
19#include <linux/pagemap.h>
20#include <linux/err.h>
21#include <linux/leds.h>
22#include <linux/scatterlist.h>
23#include <linux/log2.h>
24#include <linux/regulator/consumer.h>
25#include <linux/pm_runtime.h>
26#include <linux/pm_wakeup.h>
27#include <linux/suspend.h>
28#include <linux/fault-inject.h>
29#include <linux/random.h>
30#include <linux/slab.h>
31#include <linux/of.h>
32
33#include <linux/mmc/card.h>
34#include <linux/mmc/host.h>
35#include <linux/mmc/mmc.h>
36#include <linux/mmc/sd.h>
37#include <linux/mmc/slot-gpio.h>
38
39#include "core.h"
40#include "bus.h"
41#include "host.h"
42#include "sdio_bus.h"
43#include "pwrseq.h"
44
45#include "mmc_ops.h"
46#include "sd_ops.h"
47#include "sdio_ops.h"
48
49/* If the device is not responding */
50#define MMC_CORE_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
51
52/*
53 * Background operations can take a long time, depending on the housekeeping
54 * operations the card has to perform.
55 */
56#define MMC_BKOPS_MAX_TIMEOUT (4 * 60 * 1000) /* max time to wait in ms */
57
58static struct workqueue_struct *workqueue;
59static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
60
61/*
62 * Enabling software CRCs on the data blocks can be a significant (30%)
63 * performance cost, and for other reasons may not always be desired.
64 * So we allow it it to be disabled.
65 */
66bool use_spi_crc = 1;
67module_param(use_spi_crc, bool, 0);
68
69/*
70 * Internal function. Schedule delayed work in the MMC work queue.
71 */
72static int mmc_schedule_delayed_work(struct delayed_work *work,
73 unsigned long delay)
74{
75 return queue_delayed_work(workqueue, work, delay);
76}
77
78/*
79 * Internal function. Flush all scheduled work from the MMC work queue.
80 */
81static void mmc_flush_scheduled_work(void)
82{
83 flush_workqueue(workqueue);
84}
85
86#ifdef CONFIG_FAIL_MMC_REQUEST
87
88/*
89 * Internal function. Inject random data errors.
90 * If mmc_data is NULL no errors are injected.
91 */
92static void mmc_should_fail_request(struct mmc_host *host,
93 struct mmc_request *mrq)
94{
95 struct mmc_command *cmd = mrq->cmd;
96 struct mmc_data *data = mrq->data;
97 static const int data_errors[] = {
98 -ETIMEDOUT,
99 -EILSEQ,
100 -EIO,
101 };
102
103 if (!data)
104 return;
105
106 if (cmd->error || data->error ||
107 !should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
108 return;
109
110 data->error = data_errors[prandom_u32() % ARRAY_SIZE(data_errors)];
111 data->bytes_xfered = (prandom_u32() % (data->bytes_xfered >> 9)) << 9;
112}
113
114#else /* CONFIG_FAIL_MMC_REQUEST */
115
116static inline void mmc_should_fail_request(struct mmc_host *host,
117 struct mmc_request *mrq)
118{
119}
120
121#endif /* CONFIG_FAIL_MMC_REQUEST */
122
123/**
124 * mmc_request_done - finish processing an MMC request
125 * @host: MMC host which completed request
126 * @mrq: MMC request which request
127 *
128 * MMC drivers should call this function when they have completed
129 * their processing of a request.
130 */
131void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
132{
133 struct mmc_command *cmd = mrq->cmd;
134 int err = cmd->error;
135
136 /* Flag re-tuning needed on CRC errors */
137 if (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) ||
138 (mrq->data && mrq->data->error == -EILSEQ) ||
139 (mrq->stop && mrq->stop->error == -EILSEQ))
140 mmc_retune_needed(host);
141
142 if (err && cmd->retries && mmc_host_is_spi(host)) {
143 if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
144 cmd->retries = 0;
145 }
146
147 if (err && cmd->retries && !mmc_card_removed(host->card)) {
148 /*
149 * Request starter must handle retries - see
150 * mmc_wait_for_req_done().
151 */
152 if (mrq->done)
153 mrq->done(mrq);
154 } else {
155 mmc_should_fail_request(host, mrq);
156
157 led_trigger_event(host->led, LED_OFF);
158
159 if (mrq->sbc) {
160 pr_debug("%s: req done <CMD%u>: %d: %08x %08x %08x %08x\n",
161 mmc_hostname(host), mrq->sbc->opcode,
162 mrq->sbc->error,
163 mrq->sbc->resp[0], mrq->sbc->resp[1],
164 mrq->sbc->resp[2], mrq->sbc->resp[3]);
165 }
166
167 pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
168 mmc_hostname(host), cmd->opcode, err,
169 cmd->resp[0], cmd->resp[1],
170 cmd->resp[2], cmd->resp[3]);
171
172 if (mrq->data) {
173 pr_debug("%s: %d bytes transferred: %d\n",
174 mmc_hostname(host),
175 mrq->data->bytes_xfered, mrq->data->error);
176 }
177
178 if (mrq->stop) {
179 pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n",
180 mmc_hostname(host), mrq->stop->opcode,
181 mrq->stop->error,
182 mrq->stop->resp[0], mrq->stop->resp[1],
183 mrq->stop->resp[2], mrq->stop->resp[3]);
184 }
185
186 if (mrq->done)
187 mrq->done(mrq);
188
189 mmc_host_clk_release(host);
190 }
191}
192
193EXPORT_SYMBOL(mmc_request_done);
194
195static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
196{
197 int err;
198
199 /* Assumes host controller has been runtime resumed by mmc_claim_host */
200 err = mmc_retune(host);
201 if (err) {
202 mrq->cmd->error = err;
203 mmc_request_done(host, mrq);
204 return;
205 }
206
207 host->ops->request(host, mrq);
208}
209
210static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
211{
212#ifdef CONFIG_MMC_DEBUG
213 unsigned int i, sz;
214 struct scatterlist *sg;
215#endif
216 mmc_retune_hold(host);
217
218 if (mmc_card_removed(host->card))
219 return -ENOMEDIUM;
220
221 if (mrq->sbc) {
222 pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
223 mmc_hostname(host), mrq->sbc->opcode,
224 mrq->sbc->arg, mrq->sbc->flags);
225 }
226
227 pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
228 mmc_hostname(host), mrq->cmd->opcode,
229 mrq->cmd->arg, mrq->cmd->flags);
230
231 if (mrq->data) {
232 pr_debug("%s: blksz %d blocks %d flags %08x "
233 "tsac %d ms nsac %d\n",
234 mmc_hostname(host), mrq->data->blksz,
235 mrq->data->blocks, mrq->data->flags,
236 mrq->data->timeout_ns / 1000000,
237 mrq->data->timeout_clks);
238 }
239
240 if (mrq->stop) {
241 pr_debug("%s: CMD%u arg %08x flags %08x\n",
242 mmc_hostname(host), mrq->stop->opcode,
243 mrq->stop->arg, mrq->stop->flags);
244 }
245
246 WARN_ON(!host->claimed);
247
248 mrq->cmd->error = 0;
249 mrq->cmd->mrq = mrq;
250 if (mrq->sbc) {
251 mrq->sbc->error = 0;
252 mrq->sbc->mrq = mrq;
253 }
254 if (mrq->data) {
255 BUG_ON(mrq->data->blksz > host->max_blk_size);
256 BUG_ON(mrq->data->blocks > host->max_blk_count);
257 BUG_ON(mrq->data->blocks * mrq->data->blksz >
258 host->max_req_size);
259
260#ifdef CONFIG_MMC_DEBUG
261 sz = 0;
262 for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
263 sz += sg->length;
264 BUG_ON(sz != mrq->data->blocks * mrq->data->blksz);
265#endif
266
267 mrq->cmd->data = mrq->data;
268 mrq->data->error = 0;
269 mrq->data->mrq = mrq;
270 if (mrq->stop) {
271 mrq->data->stop = mrq->stop;
272 mrq->stop->error = 0;
273 mrq->stop->mrq = mrq;
274 }
275 }
276 mmc_host_clk_hold(host);
277 led_trigger_event(host->led, LED_FULL);
278 __mmc_start_request(host, mrq);
279
280 return 0;
281}
282
283/**
284 * mmc_start_bkops - start BKOPS for supported cards
285 * @card: MMC card to start BKOPS
286 * @form_exception: A flag to indicate if this function was
287 * called due to an exception raised by the card
288 *
289 * Start background operations whenever requested.
290 * When the urgent BKOPS bit is set in a R1 command response
291 * then background operations should be started immediately.
292*/
293void mmc_start_bkops(struct mmc_card *card, bool from_exception)
294{
295 int err;
296 int timeout;
297 bool use_busy_signal;
298
299 BUG_ON(!card);
300
301 if (!card->ext_csd.man_bkops_en || mmc_card_doing_bkops(card))
302 return;
303
304 err = mmc_read_bkops_status(card);
305 if (err) {
306 pr_err("%s: Failed to read bkops status: %d\n",
307 mmc_hostname(card->host), err);
308 return;
309 }
310
311 if (!card->ext_csd.raw_bkops_status)
312 return;
313
314 if (card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2 &&
315 from_exception)
316 return;
317
318 mmc_claim_host(card->host);
319 if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) {
320 timeout = MMC_BKOPS_MAX_TIMEOUT;
321 use_busy_signal = true;
322 } else {
323 timeout = 0;
324 use_busy_signal = false;
325 }
326
327 mmc_retune_hold(card->host);
328
329 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
330 EXT_CSD_BKOPS_START, 1, timeout,
331 use_busy_signal, true, false);
332 if (err) {
333 pr_warn("%s: Error %d starting bkops\n",
334 mmc_hostname(card->host), err);
335 mmc_retune_release(card->host);
336 goto out;
337 }
338
339 /*
340 * For urgent bkops status (LEVEL_2 and more)
341 * bkops executed synchronously, otherwise
342 * the operation is in progress
343 */
344 if (!use_busy_signal)
345 mmc_card_set_doing_bkops(card);
346 else
347 mmc_retune_release(card->host);
348out:
349 mmc_release_host(card->host);
350}
351EXPORT_SYMBOL(mmc_start_bkops);
352
353/*
354 * mmc_wait_data_done() - done callback for data request
355 * @mrq: done data request
356 *
357 * Wakes up mmc context, passed as a callback to host controller driver
358 */
359static void mmc_wait_data_done(struct mmc_request *mrq)
360{
361 struct mmc_context_info *context_info = &mrq->host->context_info;
362
363 context_info->is_done_rcv = true;
364 wake_up_interruptible(&context_info->wait);
365}
366
367static void mmc_wait_done(struct mmc_request *mrq)
368{
369 complete(&mrq->completion);
370}
371
372/*
373 *__mmc_start_data_req() - starts data request
374 * @host: MMC host to start the request
375 * @mrq: data request to start
376 *
377 * Sets the done callback to be called when request is completed by the card.
378 * Starts data mmc request execution
379 */
380static int __mmc_start_data_req(struct mmc_host *host, struct mmc_request *mrq)
381{
382 int err;
383
384 mrq->done = mmc_wait_data_done;
385 mrq->host = host;
386
387 err = mmc_start_request(host, mrq);
388 if (err) {
389 mrq->cmd->error = err;
390 mmc_wait_data_done(mrq);
391 }
392
393 return err;
394}
395
396static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
397{
398 int err;
399
400 init_completion(&mrq->completion);
401 mrq->done = mmc_wait_done;
402
403 err = mmc_start_request(host, mrq);
404 if (err) {
405 mrq->cmd->error = err;
406 complete(&mrq->completion);
407 }
408
409 return err;
410}
411
412/*
413 * mmc_wait_for_data_req_done() - wait for request completed
414 * @host: MMC host to prepare the command.
415 * @mrq: MMC request to wait for
416 *
417 * Blocks MMC context till host controller will ack end of data request
418 * execution or new request notification arrives from the block layer.
419 * Handles command retries.
420 *
421 * Returns enum mmc_blk_status after checking errors.
422 */
423static int mmc_wait_for_data_req_done(struct mmc_host *host,
424 struct mmc_request *mrq,
425 struct mmc_async_req *next_req)
426{
427 struct mmc_command *cmd;
428 struct mmc_context_info *context_info = &host->context_info;
429 int err;
430 unsigned long flags;
431
432 while (1) {
433 wait_event_interruptible(context_info->wait,
434 (context_info->is_done_rcv ||
435 context_info->is_new_req));
436 spin_lock_irqsave(&context_info->lock, flags);
437 context_info->is_waiting_last_req = false;
438 spin_unlock_irqrestore(&context_info->lock, flags);
439 if (context_info->is_done_rcv) {
440 context_info->is_done_rcv = false;
441 context_info->is_new_req = false;
442 cmd = mrq->cmd;
443
444 if (!cmd->error || !cmd->retries ||
445 mmc_card_removed(host->card)) {
446 err = host->areq->err_check(host->card,
447 host->areq);
448 break; /* return err */
449 } else {
450 mmc_retune_recheck(host);
451 pr_info("%s: req failed (CMD%u): %d, retrying...\n",
452 mmc_hostname(host),
453 cmd->opcode, cmd->error);
454 cmd->retries--;
455 cmd->error = 0;
456 __mmc_start_request(host, mrq);
457 continue; /* wait for done/new event again */
458 }
459 } else if (context_info->is_new_req) {
460 context_info->is_new_req = false;
461 if (!next_req)
462 return MMC_BLK_NEW_REQUEST;
463 }
464 }
465 mmc_retune_release(host);
466 return err;
467}
468
469static void mmc_wait_for_req_done(struct mmc_host *host,
470 struct mmc_request *mrq)
471{
472 struct mmc_command *cmd;
473
474 while (1) {
475 wait_for_completion(&mrq->completion);
476
477 cmd = mrq->cmd;
478
479 /*
480 * If host has timed out waiting for the sanitize
481 * to complete, card might be still in programming state
482 * so let's try to bring the card out of programming
483 * state.
484 */
485 if (cmd->sanitize_busy && cmd->error == -ETIMEDOUT) {
486 if (!mmc_interrupt_hpi(host->card)) {
487 pr_warn("%s: %s: Interrupted sanitize\n",
488 mmc_hostname(host), __func__);
489 cmd->error = 0;
490 break;
491 } else {
492 pr_err("%s: %s: Failed to interrupt sanitize\n",
493 mmc_hostname(host), __func__);
494 }
495 }
496 if (!cmd->error || !cmd->retries ||
497 mmc_card_removed(host->card))
498 break;
499
500 mmc_retune_recheck(host);
501
502 pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
503 mmc_hostname(host), cmd->opcode, cmd->error);
504 cmd->retries--;
505 cmd->error = 0;
506 __mmc_start_request(host, mrq);
507 }
508
509 mmc_retune_release(host);
510}
511
512/**
513 * mmc_pre_req - Prepare for a new request
514 * @host: MMC host to prepare command
515 * @mrq: MMC request to prepare for
516 * @is_first_req: true if there is no previous started request
517 * that may run in parellel to this call, otherwise false
518 *
519 * mmc_pre_req() is called in prior to mmc_start_req() to let
520 * host prepare for the new request. Preparation of a request may be
521 * performed while another request is running on the host.
522 */
523static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
524 bool is_first_req)
525{
526 if (host->ops->pre_req) {
527 mmc_host_clk_hold(host);
528 host->ops->pre_req(host, mrq, is_first_req);
529 mmc_host_clk_release(host);
530 }
531}
532
533/**
534 * mmc_post_req - Post process a completed request
535 * @host: MMC host to post process command
536 * @mrq: MMC request to post process for
537 * @err: Error, if non zero, clean up any resources made in pre_req
538 *
539 * Let the host post process a completed request. Post processing of
540 * a request may be performed while another reuqest is running.
541 */
542static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
543 int err)
544{
545 if (host->ops->post_req) {
546 mmc_host_clk_hold(host);
547 host->ops->post_req(host, mrq, err);
548 mmc_host_clk_release(host);
549 }
550}
551
552/**
553 * mmc_start_req - start a non-blocking request
554 * @host: MMC host to start command
555 * @areq: async request to start
556 * @error: out parameter returns 0 for success, otherwise non zero
557 *
558 * Start a new MMC custom command request for a host.
559 * If there is on ongoing async request wait for completion
560 * of that request and start the new one and return.
561 * Does not wait for the new request to complete.
562 *
563 * Returns the completed request, NULL in case of none completed.
564 * Wait for the an ongoing request (previoulsy started) to complete and
565 * return the completed request. If there is no ongoing request, NULL
566 * is returned without waiting. NULL is not an error condition.
567 */
568struct mmc_async_req *mmc_start_req(struct mmc_host *host,
569 struct mmc_async_req *areq, int *error)
570{
571 int err = 0;
572 int start_err = 0;
573 struct mmc_async_req *data = host->areq;
574
575 /* Prepare a new request */
576 if (areq)
577 mmc_pre_req(host, areq->mrq, !host->areq);
578
579 if (host->areq) {
580 err = mmc_wait_for_data_req_done(host, host->areq->mrq, areq);
581 if (err == MMC_BLK_NEW_REQUEST) {
582 if (error)
583 *error = err;
584 /*
585 * The previous request was not completed,
586 * nothing to return
587 */
588 return NULL;
589 }
590 /*
591 * Check BKOPS urgency for each R1 response
592 */
593 if (host->card && mmc_card_mmc(host->card) &&
594 ((mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1) ||
595 (mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1B)) &&
596 (host->areq->mrq->cmd->resp[0] & R1_EXCEPTION_EVENT)) {
597
598 /* Cancel the prepared request */
599 if (areq)
600 mmc_post_req(host, areq->mrq, -EINVAL);
601
602 mmc_start_bkops(host->card, true);
603
604 /* prepare the request again */
605 if (areq)
606 mmc_pre_req(host, areq->mrq, !host->areq);
607 }
608 }
609
610 if (!err && areq)
611 start_err = __mmc_start_data_req(host, areq->mrq);
612
613 if (host->areq)
614 mmc_post_req(host, host->areq->mrq, 0);
615
616 /* Cancel a prepared request if it was not started. */
617 if ((err || start_err) && areq)
618 mmc_post_req(host, areq->mrq, -EINVAL);
619
620 if (err)
621 host->areq = NULL;
622 else
623 host->areq = areq;
624
625 if (error)
626 *error = err;
627 return data;
628}
629EXPORT_SYMBOL(mmc_start_req);
630
631/**
632 * mmc_wait_for_req - start a request and wait for completion
633 * @host: MMC host to start command
634 * @mrq: MMC request to start
635 *
636 * Start a new MMC custom command request for a host, and wait
637 * for the command to complete. Does not attempt to parse the
638 * response.
639 */
640void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
641{
642 __mmc_start_req(host, mrq);
643 mmc_wait_for_req_done(host, mrq);
644}
645EXPORT_SYMBOL(mmc_wait_for_req);
646
647/**
648 * mmc_interrupt_hpi - Issue for High priority Interrupt
649 * @card: the MMC card associated with the HPI transfer
650 *
651 * Issued High Priority Interrupt, and check for card status
652 * until out-of prg-state.
653 */
654int mmc_interrupt_hpi(struct mmc_card *card)
655{
656 int err;
657 u32 status;
658 unsigned long prg_wait;
659
660 BUG_ON(!card);
661
662 if (!card->ext_csd.hpi_en) {
663 pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
664 return 1;
665 }
666
667 mmc_claim_host(card->host);
668 err = mmc_send_status(card, &status);
669 if (err) {
670 pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
671 goto out;
672 }
673
674 switch (R1_CURRENT_STATE(status)) {
675 case R1_STATE_IDLE:
676 case R1_STATE_READY:
677 case R1_STATE_STBY:
678 case R1_STATE_TRAN:
679 /*
680 * In idle and transfer states, HPI is not needed and the caller
681 * can issue the next intended command immediately
682 */
683 goto out;
684 case R1_STATE_PRG:
685 break;
686 default:
687 /* In all other states, it's illegal to issue HPI */
688 pr_debug("%s: HPI cannot be sent. Card state=%d\n",
689 mmc_hostname(card->host), R1_CURRENT_STATE(status));
690 err = -EINVAL;
691 goto out;
692 }
693
694 err = mmc_send_hpi_cmd(card, &status);
695 if (err)
696 goto out;
697
698 prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
699 do {
700 err = mmc_send_status(card, &status);
701
702 if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
703 break;
704 if (time_after(jiffies, prg_wait))
705 err = -ETIMEDOUT;
706 } while (!err);
707
708out:
709 mmc_release_host(card->host);
710 return err;
711}
712EXPORT_SYMBOL(mmc_interrupt_hpi);
713
714/**
715 * mmc_wait_for_cmd - start a command and wait for completion
716 * @host: MMC host to start command
717 * @cmd: MMC command to start
718 * @retries: maximum number of retries
719 *
720 * Start a new MMC command for a host, and wait for the command
721 * to complete. Return any error that occurred while the command
722 * was executing. Do not attempt to parse the response.
723 */
724int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
725{
726 struct mmc_request mrq = {NULL};
727
728 WARN_ON(!host->claimed);
729
730 memset(cmd->resp, 0, sizeof(cmd->resp));
731 cmd->retries = retries;
732
733 mrq.cmd = cmd;
734 cmd->data = NULL;
735
736 mmc_wait_for_req(host, &mrq);
737
738 return cmd->error;
739}
740
741EXPORT_SYMBOL(mmc_wait_for_cmd);
742
743/**
744 * mmc_stop_bkops - stop ongoing BKOPS
745 * @card: MMC card to check BKOPS
746 *
747 * Send HPI command to stop ongoing background operations to
748 * allow rapid servicing of foreground operations, e.g. read/
749 * writes. Wait until the card comes out of the programming state
750 * to avoid errors in servicing read/write requests.
751 */
752int mmc_stop_bkops(struct mmc_card *card)
753{
754 int err = 0;
755
756 BUG_ON(!card);
757 err = mmc_interrupt_hpi(card);
758
759 /*
760 * If err is EINVAL, we can't issue an HPI.
761 * It should complete the BKOPS.
762 */
763 if (!err || (err == -EINVAL)) {
764 mmc_card_clr_doing_bkops(card);
765 mmc_retune_release(card->host);
766 err = 0;
767 }
768
769 return err;
770}
771EXPORT_SYMBOL(mmc_stop_bkops);
772
773int mmc_read_bkops_status(struct mmc_card *card)
774{
775 int err;
776 u8 *ext_csd;
777
778 mmc_claim_host(card->host);
779 err = mmc_get_ext_csd(card, &ext_csd);
780 mmc_release_host(card->host);
781 if (err)
782 return err;
783
784 card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
785 card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
786 kfree(ext_csd);
787 return 0;
788}
789EXPORT_SYMBOL(mmc_read_bkops_status);
790
791/**
792 * mmc_set_data_timeout - set the timeout for a data command
793 * @data: data phase for command
794 * @card: the MMC card associated with the data transfer
795 *
796 * Computes the data timeout parameters according to the
797 * correct algorithm given the card type.
798 */
799void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
800{
801 unsigned int mult;
802
803 /*
804 * SDIO cards only define an upper 1 s limit on access.
805 */
806 if (mmc_card_sdio(card)) {
807 data->timeout_ns = 1000000000;
808 data->timeout_clks = 0;
809 return;
810 }
811
812 /*
813 * SD cards use a 100 multiplier rather than 10
814 */
815 mult = mmc_card_sd(card) ? 100 : 10;
816
817 /*
818 * Scale up the multiplier (and therefore the timeout) by
819 * the r2w factor for writes.
820 */
821 if (data->flags & MMC_DATA_WRITE)
822 mult <<= card->csd.r2w_factor;
823
824 data->timeout_ns = card->csd.tacc_ns * mult;
825 data->timeout_clks = card->csd.tacc_clks * mult;
826
827 /*
828 * SD cards also have an upper limit on the timeout.
829 */
830 if (mmc_card_sd(card)) {
831 unsigned int timeout_us, limit_us;
832
833 timeout_us = data->timeout_ns / 1000;
834 if (mmc_host_clk_rate(card->host))
835 timeout_us += data->timeout_clks * 1000 /
836 (mmc_host_clk_rate(card->host) / 1000);
837
838 if (data->flags & MMC_DATA_WRITE)
839 /*
840 * The MMC spec "It is strongly recommended
841 * for hosts to implement more than 500ms
842 * timeout value even if the card indicates
843 * the 250ms maximum busy length." Even the
844 * previous value of 300ms is known to be
845 * insufficient for some cards.
846 */
847 limit_us = 3000000;
848 else
849 limit_us = 100000;
850
851 /*
852 * SDHC cards always use these fixed values.
853 */
854 if (timeout_us > limit_us || mmc_card_blockaddr(card)) {
855 data->timeout_ns = limit_us * 1000;
856 data->timeout_clks = 0;
857 }
858
859 /* assign limit value if invalid */
860 if (timeout_us == 0)
861 data->timeout_ns = limit_us * 1000;
862 }
863
864 /*
865 * Some cards require longer data read timeout than indicated in CSD.
866 * Address this by setting the read timeout to a "reasonably high"
867 * value. For the cards tested, 300ms has proven enough. If necessary,
868 * this value can be increased if other problematic cards require this.
869 */
870 if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
871 data->timeout_ns = 300000000;
872 data->timeout_clks = 0;
873 }
874
875 /*
876 * Some cards need very high timeouts if driven in SPI mode.
877 * The worst observed timeout was 900ms after writing a
878 * continuous stream of data until the internal logic
879 * overflowed.
880 */
881 if (mmc_host_is_spi(card->host)) {
882 if (data->flags & MMC_DATA_WRITE) {
883 if (data->timeout_ns < 1000000000)
884 data->timeout_ns = 1000000000; /* 1s */
885 } else {
886 if (data->timeout_ns < 100000000)
887 data->timeout_ns = 100000000; /* 100ms */
888 }
889 }
890}
891EXPORT_SYMBOL(mmc_set_data_timeout);
892
893/**
894 * mmc_align_data_size - pads a transfer size to a more optimal value
895 * @card: the MMC card associated with the data transfer
896 * @sz: original transfer size
897 *
898 * Pads the original data size with a number of extra bytes in
899 * order to avoid controller bugs and/or performance hits
900 * (e.g. some controllers revert to PIO for certain sizes).
901 *
902 * Returns the improved size, which might be unmodified.
903 *
904 * Note that this function is only relevant when issuing a
905 * single scatter gather entry.
906 */
907unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
908{
909 /*
910 * FIXME: We don't have a system for the controller to tell
911 * the core about its problems yet, so for now we just 32-bit
912 * align the size.
913 */
914 sz = ((sz + 3) / 4) * 4;
915
916 return sz;
917}
918EXPORT_SYMBOL(mmc_align_data_size);
919
920/**
921 * __mmc_claim_host - exclusively claim a host
922 * @host: mmc host to claim
923 * @abort: whether or not the operation should be aborted
924 *
925 * Claim a host for a set of operations. If @abort is non null and
926 * dereference a non-zero value then this will return prematurely with
927 * that non-zero value without acquiring the lock. Returns zero
928 * with the lock held otherwise.
929 */
930int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
931{
932 DECLARE_WAITQUEUE(wait, current);
933 unsigned long flags;
934 int stop;
935 bool pm = false;
936
937 might_sleep();
938
939 add_wait_queue(&host->wq, &wait);
940 spin_lock_irqsave(&host->lock, flags);
941 while (1) {
942 set_current_state(TASK_UNINTERRUPTIBLE);
943 stop = abort ? atomic_read(abort) : 0;
944 if (stop || !host->claimed || host->claimer == current)
945 break;
946 spin_unlock_irqrestore(&host->lock, flags);
947 schedule();
948 spin_lock_irqsave(&host->lock, flags);
949 }
950 set_current_state(TASK_RUNNING);
951 if (!stop) {
952 host->claimed = 1;
953 host->claimer = current;
954 host->claim_cnt += 1;
955 if (host->claim_cnt == 1)
956 pm = true;
957 } else
958 wake_up(&host->wq);
959 spin_unlock_irqrestore(&host->lock, flags);
960 remove_wait_queue(&host->wq, &wait);
961
962 if (pm)
963 pm_runtime_get_sync(mmc_dev(host));
964
965 return stop;
966}
967EXPORT_SYMBOL(__mmc_claim_host);
968
969/**
970 * mmc_release_host - release a host
971 * @host: mmc host to release
972 *
973 * Release a MMC host, allowing others to claim the host
974 * for their operations.
975 */
976void mmc_release_host(struct mmc_host *host)
977{
978 unsigned long flags;
979
980 WARN_ON(!host->claimed);
981
982 spin_lock_irqsave(&host->lock, flags);
983 if (--host->claim_cnt) {
984 /* Release for nested claim */
985 spin_unlock_irqrestore(&host->lock, flags);
986 } else {
987 host->claimed = 0;
988 host->claimer = NULL;
989 spin_unlock_irqrestore(&host->lock, flags);
990 wake_up(&host->wq);
991 pm_runtime_mark_last_busy(mmc_dev(host));
992 pm_runtime_put_autosuspend(mmc_dev(host));
993 }
994}
995EXPORT_SYMBOL(mmc_release_host);
996
997/*
998 * This is a helper function, which fetches a runtime pm reference for the
999 * card device and also claims the host.
1000 */
1001void mmc_get_card(struct mmc_card *card)
1002{
1003 pm_runtime_get_sync(&card->dev);
1004 mmc_claim_host(card->host);
1005}
1006EXPORT_SYMBOL(mmc_get_card);
1007
1008/*
1009 * This is a helper function, which releases the host and drops the runtime
1010 * pm reference for the card device.
1011 */
1012void mmc_put_card(struct mmc_card *card)
1013{
1014 mmc_release_host(card->host);
1015 pm_runtime_mark_last_busy(&card->dev);
1016 pm_runtime_put_autosuspend(&card->dev);
1017}
1018EXPORT_SYMBOL(mmc_put_card);
1019
1020/*
1021 * Internal function that does the actual ios call to the host driver,
1022 * optionally printing some debug output.
1023 */
1024static inline void mmc_set_ios(struct mmc_host *host)
1025{
1026 struct mmc_ios *ios = &host->ios;
1027
1028 pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
1029 "width %u timing %u\n",
1030 mmc_hostname(host), ios->clock, ios->bus_mode,
1031 ios->power_mode, ios->chip_select, ios->vdd,
1032 ios->bus_width, ios->timing);
1033
1034 if (ios->clock > 0)
1035 mmc_set_ungated(host);
1036 host->ops->set_ios(host, ios);
1037}
1038
1039/*
1040 * Control chip select pin on a host.
1041 */
1042void mmc_set_chip_select(struct mmc_host *host, int mode)
1043{
1044 mmc_host_clk_hold(host);
1045 host->ios.chip_select = mode;
1046 mmc_set_ios(host);
1047 mmc_host_clk_release(host);
1048}
1049
1050/*
1051 * Sets the host clock to the highest possible frequency that
1052 * is below "hz".
1053 */
1054static void __mmc_set_clock(struct mmc_host *host, unsigned int hz)
1055{
1056 WARN_ON(hz && hz < host->f_min);
1057
1058 if (hz > host->f_max)
1059 hz = host->f_max;
1060
1061 host->ios.clock = hz;
1062 mmc_set_ios(host);
1063}
1064
1065void mmc_set_clock(struct mmc_host *host, unsigned int hz)
1066{
1067 mmc_host_clk_hold(host);
1068 __mmc_set_clock(host, hz);
1069 mmc_host_clk_release(host);
1070}
1071
1072#ifdef CONFIG_MMC_CLKGATE
1073/*
1074 * This gates the clock by setting it to 0 Hz.
1075 */
1076void mmc_gate_clock(struct mmc_host *host)
1077{
1078 unsigned long flags;
1079
1080 spin_lock_irqsave(&host->clk_lock, flags);
1081 host->clk_old = host->ios.clock;
1082 host->ios.clock = 0;
1083 host->clk_gated = true;
1084 spin_unlock_irqrestore(&host->clk_lock, flags);
1085 mmc_set_ios(host);
1086}
1087
1088/*
1089 * This restores the clock from gating by using the cached
1090 * clock value.
1091 */
1092void mmc_ungate_clock(struct mmc_host *host)
1093{
1094 /*
1095 * We should previously have gated the clock, so the clock shall
1096 * be 0 here! The clock may however be 0 during initialization,
1097 * when some request operations are performed before setting
1098 * the frequency. When ungate is requested in that situation
1099 * we just ignore the call.
1100 */
1101 if (host->clk_old) {
1102 BUG_ON(host->ios.clock);
1103 /* This call will also set host->clk_gated to false */
1104 __mmc_set_clock(host, host->clk_old);
1105 }
1106}
1107
1108void mmc_set_ungated(struct mmc_host *host)
1109{
1110 unsigned long flags;
1111
1112 /*
1113 * We've been given a new frequency while the clock is gated,
1114 * so make sure we regard this as ungating it.
1115 */
1116 spin_lock_irqsave(&host->clk_lock, flags);
1117 host->clk_gated = false;
1118 spin_unlock_irqrestore(&host->clk_lock, flags);
1119}
1120
1121#else
1122void mmc_set_ungated(struct mmc_host *host)
1123{
1124}
1125#endif
1126
1127int mmc_execute_tuning(struct mmc_card *card)
1128{
1129 struct mmc_host *host = card->host;
1130 u32 opcode;
1131 int err;
1132
1133 if (!host->ops->execute_tuning)
1134 return 0;
1135
1136 if (mmc_card_mmc(card))
1137 opcode = MMC_SEND_TUNING_BLOCK_HS200;
1138 else
1139 opcode = MMC_SEND_TUNING_BLOCK;
1140
1141 mmc_host_clk_hold(host);
1142 err = host->ops->execute_tuning(host, opcode);
1143 mmc_host_clk_release(host);
1144
1145 if (err)
1146 pr_err("%s: tuning execution failed\n", mmc_hostname(host));
1147 else
1148 mmc_retune_enable(host);
1149
1150 return err;
1151}
1152
1153/*
1154 * Change the bus mode (open drain/push-pull) of a host.
1155 */
1156void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
1157{
1158 mmc_host_clk_hold(host);
1159 host->ios.bus_mode = mode;
1160 mmc_set_ios(host);
1161 mmc_host_clk_release(host);
1162}
1163
1164/*
1165 * Change data bus width of a host.
1166 */
1167void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
1168{
1169 mmc_host_clk_hold(host);
1170 host->ios.bus_width = width;
1171 mmc_set_ios(host);
1172 mmc_host_clk_release(host);
1173}
1174
1175/*
1176 * Set initial state after a power cycle or a hw_reset.
1177 */
1178void mmc_set_initial_state(struct mmc_host *host)
1179{
1180 mmc_retune_disable(host);
1181
1182 if (mmc_host_is_spi(host))
1183 host->ios.chip_select = MMC_CS_HIGH;
1184 else
1185 host->ios.chip_select = MMC_CS_DONTCARE;
1186 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
1187 host->ios.bus_width = MMC_BUS_WIDTH_1;
1188 host->ios.timing = MMC_TIMING_LEGACY;
1189 host->ios.drv_type = 0;
1190
1191 mmc_set_ios(host);
1192}
1193
1194/**
1195 * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
1196 * @vdd: voltage (mV)
1197 * @low_bits: prefer low bits in boundary cases
1198 *
1199 * This function returns the OCR bit number according to the provided @vdd
1200 * value. If conversion is not possible a negative errno value returned.
1201 *
1202 * Depending on the @low_bits flag the function prefers low or high OCR bits
1203 * on boundary voltages. For example,
1204 * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
1205 * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
1206 *
1207 * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
1208 */
1209static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
1210{
1211 const int max_bit = ilog2(MMC_VDD_35_36);
1212 int bit;
1213
1214 if (vdd < 1650 || vdd > 3600)
1215 return -EINVAL;
1216
1217 if (vdd >= 1650 && vdd <= 1950)
1218 return ilog2(MMC_VDD_165_195);
1219
1220 if (low_bits)
1221 vdd -= 1;
1222
1223 /* Base 2000 mV, step 100 mV, bit's base 8. */
1224 bit = (vdd - 2000) / 100 + 8;
1225 if (bit > max_bit)
1226 return max_bit;
1227 return bit;
1228}
1229
1230/**
1231 * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
1232 * @vdd_min: minimum voltage value (mV)
1233 * @vdd_max: maximum voltage value (mV)
1234 *
1235 * This function returns the OCR mask bits according to the provided @vdd_min
1236 * and @vdd_max values. If conversion is not possible the function returns 0.
1237 *
1238 * Notes wrt boundary cases:
1239 * This function sets the OCR bits for all boundary voltages, for example
1240 * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
1241 * MMC_VDD_34_35 mask.
1242 */
1243u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
1244{
1245 u32 mask = 0;
1246
1247 if (vdd_max < vdd_min)
1248 return 0;
1249
1250 /* Prefer high bits for the boundary vdd_max values. */
1251 vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
1252 if (vdd_max < 0)
1253 return 0;
1254
1255 /* Prefer low bits for the boundary vdd_min values. */
1256 vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
1257 if (vdd_min < 0)
1258 return 0;
1259
1260 /* Fill the mask, from max bit to min bit. */
1261 while (vdd_max >= vdd_min)
1262 mask |= 1 << vdd_max--;
1263
1264 return mask;
1265}
1266EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
1267
1268#ifdef CONFIG_OF
1269
1270/**
1271 * mmc_of_parse_voltage - return mask of supported voltages
1272 * @np: The device node need to be parsed.
1273 * @mask: mask of voltages available for MMC/SD/SDIO
1274 *
1275 * 1. Return zero on success.
1276 * 2. Return negative errno: voltage-range is invalid.
1277 */
1278int mmc_of_parse_voltage(struct device_node *np, u32 *mask)
1279{
1280 const u32 *voltage_ranges;
1281 int num_ranges, i;
1282
1283 voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges);
1284 num_ranges = num_ranges / sizeof(*voltage_ranges) / 2;
1285 if (!voltage_ranges || !num_ranges) {
1286 pr_info("%s: voltage-ranges unspecified\n", np->full_name);
1287 return -EINVAL;
1288 }
1289
1290 for (i = 0; i < num_ranges; i++) {
1291 const int j = i * 2;
1292 u32 ocr_mask;
1293
1294 ocr_mask = mmc_vddrange_to_ocrmask(
1295 be32_to_cpu(voltage_ranges[j]),
1296 be32_to_cpu(voltage_ranges[j + 1]));
1297 if (!ocr_mask) {
1298 pr_err("%s: voltage-range #%d is invalid\n",
1299 np->full_name, i);
1300 return -EINVAL;
1301 }
1302 *mask |= ocr_mask;
1303 }
1304
1305 return 0;
1306}
1307EXPORT_SYMBOL(mmc_of_parse_voltage);
1308
1309#endif /* CONFIG_OF */
1310
1311static int mmc_of_get_func_num(struct device_node *node)
1312{
1313 u32 reg;
1314 int ret;
1315
1316 ret = of_property_read_u32(node, "reg", ®);
1317 if (ret < 0)
1318 return ret;
1319
1320 return reg;
1321}
1322
1323struct device_node *mmc_of_find_child_device(struct mmc_host *host,
1324 unsigned func_num)
1325{
1326 struct device_node *node;
1327
1328 if (!host->parent || !host->parent->of_node)
1329 return NULL;
1330
1331 for_each_child_of_node(host->parent->of_node, node) {
1332 if (mmc_of_get_func_num(node) == func_num)
1333 return node;
1334 }
1335
1336 return NULL;
1337}
1338
1339#ifdef CONFIG_REGULATOR
1340
1341/**
1342 * mmc_regulator_get_ocrmask - return mask of supported voltages
1343 * @supply: regulator to use
1344 *
1345 * This returns either a negative errno, or a mask of voltages that
1346 * can be provided to MMC/SD/SDIO devices using the specified voltage
1347 * regulator. This would normally be called before registering the
1348 * MMC host adapter.
1349 */
1350int mmc_regulator_get_ocrmask(struct regulator *supply)
1351{
1352 int result = 0;
1353 int count;
1354 int i;
1355 int vdd_uV;
1356 int vdd_mV;
1357
1358 count = regulator_count_voltages(supply);
1359 if (count < 0)
1360 return count;
1361
1362 for (i = 0; i < count; i++) {
1363 vdd_uV = regulator_list_voltage(supply, i);
1364 if (vdd_uV <= 0)
1365 continue;
1366
1367 vdd_mV = vdd_uV / 1000;
1368 result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
1369 }
1370
1371 if (!result) {
1372 vdd_uV = regulator_get_voltage(supply);
1373 if (vdd_uV <= 0)
1374 return vdd_uV;
1375
1376 vdd_mV = vdd_uV / 1000;
1377 result = mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
1378 }
1379
1380 return result;
1381}
1382EXPORT_SYMBOL_GPL(mmc_regulator_get_ocrmask);
1383
1384/**
1385 * mmc_regulator_set_ocr - set regulator to match host->ios voltage
1386 * @mmc: the host to regulate
1387 * @supply: regulator to use
1388 * @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
1389 *
1390 * Returns zero on success, else negative errno.
1391 *
1392 * MMC host drivers may use this to enable or disable a regulator using
1393 * a particular supply voltage. This would normally be called from the
1394 * set_ios() method.
1395 */
1396int mmc_regulator_set_ocr(struct mmc_host *mmc,
1397 struct regulator *supply,
1398 unsigned short vdd_bit)
1399{
1400 int result = 0;
1401 int min_uV, max_uV;
1402
1403 if (vdd_bit) {
1404 int tmp;
1405
1406 /*
1407 * REVISIT mmc_vddrange_to_ocrmask() may have set some
1408 * bits this regulator doesn't quite support ... don't
1409 * be too picky, most cards and regulators are OK with
1410 * a 0.1V range goof (it's a small error percentage).
1411 */
1412 tmp = vdd_bit - ilog2(MMC_VDD_165_195);
1413 if (tmp == 0) {
1414 min_uV = 1650 * 1000;
1415 max_uV = 1950 * 1000;
1416 } else {
1417 min_uV = 1900 * 1000 + tmp * 100 * 1000;
1418 max_uV = min_uV + 100 * 1000;
1419 }
1420
1421 result = regulator_set_voltage(supply, min_uV, max_uV);
1422 if (result == 0 && !mmc->regulator_enabled) {
1423 result = regulator_enable(supply);
1424 if (!result)
1425 mmc->regulator_enabled = true;
1426 }
1427 } else if (mmc->regulator_enabled) {
1428 result = regulator_disable(supply);
1429 if (result == 0)
1430 mmc->regulator_enabled = false;
1431 }
1432
1433 if (result)
1434 dev_err(mmc_dev(mmc),
1435 "could not set regulator OCR (%d)\n", result);
1436 return result;
1437}
1438EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr);
1439
1440#endif /* CONFIG_REGULATOR */
1441
1442int mmc_regulator_get_supply(struct mmc_host *mmc)
1443{
1444 struct device *dev = mmc_dev(mmc);
1445 int ret;
1446
1447 mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc");
1448 mmc->supply.vqmmc = devm_regulator_get_optional(dev, "vqmmc");
1449
1450 if (IS_ERR(mmc->supply.vmmc)) {
1451 if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER)
1452 return -EPROBE_DEFER;
1453 dev_info(dev, "No vmmc regulator found\n");
1454 } else {
1455 ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc);
1456 if (ret > 0)
1457 mmc->ocr_avail = ret;
1458 else
1459 dev_warn(dev, "Failed getting OCR mask: %d\n", ret);
1460 }
1461
1462 if (IS_ERR(mmc->supply.vqmmc)) {
1463 if (PTR_ERR(mmc->supply.vqmmc) == -EPROBE_DEFER)
1464 return -EPROBE_DEFER;
1465 dev_info(dev, "No vqmmc regulator found\n");
1466 }
1467
1468 return 0;
1469}
1470EXPORT_SYMBOL_GPL(mmc_regulator_get_supply);
1471
1472/*
1473 * Mask off any voltages we don't support and select
1474 * the lowest voltage
1475 */
1476u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
1477{
1478 int bit;
1479
1480 /*
1481 * Sanity check the voltages that the card claims to
1482 * support.
1483 */
1484 if (ocr & 0x7F) {
1485 dev_warn(mmc_dev(host),
1486 "card claims to support voltages below defined range\n");
1487 ocr &= ~0x7F;
1488 }
1489
1490 ocr &= host->ocr_avail;
1491 if (!ocr) {
1492 dev_warn(mmc_dev(host), "no support for card's volts\n");
1493 return 0;
1494 }
1495
1496 if (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) {
1497 bit = ffs(ocr) - 1;
1498 ocr &= 3 << bit;
1499 mmc_power_cycle(host, ocr);
1500 } else {
1501 bit = fls(ocr) - 1;
1502 ocr &= 3 << bit;
1503 if (bit != host->ios.vdd)
1504 dev_warn(mmc_dev(host), "exceeding card's volts\n");
1505 }
1506
1507 return ocr;
1508}
1509
1510int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
1511{
1512 int err = 0;
1513 int old_signal_voltage = host->ios.signal_voltage;
1514
1515 host->ios.signal_voltage = signal_voltage;
1516 if (host->ops->start_signal_voltage_switch) {
1517 mmc_host_clk_hold(host);
1518 err = host->ops->start_signal_voltage_switch(host, &host->ios);
1519 mmc_host_clk_release(host);
1520 }
1521
1522 if (err)
1523 host->ios.signal_voltage = old_signal_voltage;
1524
1525 return err;
1526
1527}
1528
1529int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr)
1530{
1531 struct mmc_command cmd = {0};
1532 int err = 0;
1533 u32 clock;
1534
1535 BUG_ON(!host);
1536
1537 /*
1538 * Send CMD11 only if the request is to switch the card to
1539 * 1.8V signalling.
1540 */
1541 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
1542 return __mmc_set_signal_voltage(host, signal_voltage);
1543
1544 /*
1545 * If we cannot switch voltages, return failure so the caller
1546 * can continue without UHS mode
1547 */
1548 if (!host->ops->start_signal_voltage_switch)
1549 return -EPERM;
1550 if (!host->ops->card_busy)
1551 pr_warn("%s: cannot verify signal voltage switch\n",
1552 mmc_hostname(host));
1553
1554 mmc_host_clk_hold(host);
1555
1556 cmd.opcode = SD_SWITCH_VOLTAGE;
1557 cmd.arg = 0;
1558 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1559
1560 err = mmc_wait_for_cmd(host, &cmd, 0);
1561 if (err)
1562 goto err_command;
1563
1564 if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR)) {
1565 err = -EIO;
1566 goto err_command;
1567 }
1568 /*
1569 * The card should drive cmd and dat[0:3] low immediately
1570 * after the response of cmd11, but wait 1 ms to be sure
1571 */
1572 mmc_delay(1);
1573 if (host->ops->card_busy && !host->ops->card_busy(host)) {
1574 err = -EAGAIN;
1575 goto power_cycle;
1576 }
1577 /*
1578 * During a signal voltage level switch, the clock must be gated
1579 * for 5 ms according to the SD spec
1580 */
1581 clock = host->ios.clock;
1582 host->ios.clock = 0;
1583 mmc_set_ios(host);
1584
1585 if (__mmc_set_signal_voltage(host, signal_voltage)) {
1586 /*
1587 * Voltages may not have been switched, but we've already
1588 * sent CMD11, so a power cycle is required anyway
1589 */
1590 err = -EAGAIN;
1591 goto power_cycle;
1592 }
1593
1594 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
1595 mmc_delay(10);
1596 host->ios.clock = clock;
1597 mmc_set_ios(host);
1598
1599 /* Wait for at least 1 ms according to spec */
1600 mmc_delay(1);
1601
1602 /*
1603 * Failure to switch is indicated by the card holding
1604 * dat[0:3] low
1605 */
1606 if (host->ops->card_busy && host->ops->card_busy(host))
1607 err = -EAGAIN;
1608
1609power_cycle:
1610 if (err) {
1611 pr_debug("%s: Signal voltage switch failed, "
1612 "power cycling card\n", mmc_hostname(host));
1613 mmc_power_cycle(host, ocr);
1614 }
1615
1616err_command:
1617 mmc_host_clk_release(host);
1618
1619 return err;
1620}
1621
1622/*
1623 * Select timing parameters for host.
1624 */
1625void mmc_set_timing(struct mmc_host *host, unsigned int timing)
1626{
1627 mmc_host_clk_hold(host);
1628 host->ios.timing = timing;
1629 mmc_set_ios(host);
1630 mmc_host_clk_release(host);
1631}
1632
1633/*
1634 * Select appropriate driver type for host.
1635 */
1636void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
1637{
1638 mmc_host_clk_hold(host);
1639 host->ios.drv_type = drv_type;
1640 mmc_set_ios(host);
1641 mmc_host_clk_release(host);
1642}
1643
1644int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr,
1645 int card_drv_type, int *drv_type)
1646{
1647 struct mmc_host *host = card->host;
1648 int host_drv_type = SD_DRIVER_TYPE_B;
1649 int drive_strength;
1650
1651 *drv_type = 0;
1652
1653 if (!host->ops->select_drive_strength)
1654 return 0;
1655
1656 /* Use SD definition of driver strength for hosts */
1657 if (host->caps & MMC_CAP_DRIVER_TYPE_A)
1658 host_drv_type |= SD_DRIVER_TYPE_A;
1659
1660 if (host->caps & MMC_CAP_DRIVER_TYPE_C)
1661 host_drv_type |= SD_DRIVER_TYPE_C;
1662
1663 if (host->caps & MMC_CAP_DRIVER_TYPE_D)
1664 host_drv_type |= SD_DRIVER_TYPE_D;
1665
1666 /*
1667 * The drive strength that the hardware can support
1668 * depends on the board design. Pass the appropriate
1669 * information and let the hardware specific code
1670 * return what is possible given the options
1671 */
1672 mmc_host_clk_hold(host);
1673 drive_strength = host->ops->select_drive_strength(card, max_dtr,
1674 host_drv_type,
1675 card_drv_type,
1676 drv_type);
1677 mmc_host_clk_release(host);
1678
1679 return drive_strength;
1680}
1681
1682/*
1683 * Apply power to the MMC stack. This is a two-stage process.
1684 * First, we enable power to the card without the clock running.
1685 * We then wait a bit for the power to stabilise. Finally,
1686 * enable the bus drivers and clock to the card.
1687 *
1688 * We must _NOT_ enable the clock prior to power stablising.
1689 *
1690 * If a host does all the power sequencing itself, ignore the
1691 * initial MMC_POWER_UP stage.
1692 */
1693void mmc_power_up(struct mmc_host *host, u32 ocr)
1694{
1695 if (host->ios.power_mode == MMC_POWER_ON)
1696 return;
1697
1698 mmc_host_clk_hold(host);
1699
1700 mmc_pwrseq_pre_power_on(host);
1701
1702 host->ios.vdd = fls(ocr) - 1;
1703 host->ios.power_mode = MMC_POWER_UP;
1704 /* Set initial state and call mmc_set_ios */
1705 mmc_set_initial_state(host);
1706
1707 /* Try to set signal voltage to 3.3V but fall back to 1.8v or 1.2v */
1708 if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330) == 0)
1709 dev_dbg(mmc_dev(host), "Initial signal voltage of 3.3v\n");
1710 else if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180) == 0)
1711 dev_dbg(mmc_dev(host), "Initial signal voltage of 1.8v\n");
1712 else if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120) == 0)
1713 dev_dbg(mmc_dev(host), "Initial signal voltage of 1.2v\n");
1714
1715 /*
1716 * This delay should be sufficient to allow the power supply
1717 * to reach the minimum voltage.
1718 */
1719 mmc_delay(10);
1720
1721 mmc_pwrseq_post_power_on(host);
1722
1723 host->ios.clock = host->f_init;
1724
1725 host->ios.power_mode = MMC_POWER_ON;
1726 mmc_set_ios(host);
1727
1728 /*
1729 * This delay must be at least 74 clock sizes, or 1 ms, or the
1730 * time required to reach a stable voltage.
1731 */
1732 mmc_delay(10);
1733
1734 mmc_host_clk_release(host);
1735}
1736
1737void mmc_power_off(struct mmc_host *host)
1738{
1739 if (host->ios.power_mode == MMC_POWER_OFF)
1740 return;
1741
1742 mmc_host_clk_hold(host);
1743
1744 mmc_pwrseq_power_off(host);
1745
1746 host->ios.clock = 0;
1747 host->ios.vdd = 0;
1748
1749 host->ios.power_mode = MMC_POWER_OFF;
1750 /* Set initial state and call mmc_set_ios */
1751 mmc_set_initial_state(host);
1752
1753 /*
1754 * Some configurations, such as the 802.11 SDIO card in the OLPC
1755 * XO-1.5, require a short delay after poweroff before the card
1756 * can be successfully turned on again.
1757 */
1758 mmc_delay(1);
1759
1760 mmc_host_clk_release(host);
1761}
1762
1763void mmc_power_cycle(struct mmc_host *host, u32 ocr)
1764{
1765 mmc_power_off(host);
1766 /* Wait at least 1 ms according to SD spec */
1767 mmc_delay(1);
1768 mmc_power_up(host, ocr);
1769}
1770
1771/*
1772 * Cleanup when the last reference to the bus operator is dropped.
1773 */
1774static void __mmc_release_bus(struct mmc_host *host)
1775{
1776 BUG_ON(!host);
1777 BUG_ON(host->bus_refs);
1778 BUG_ON(!host->bus_dead);
1779
1780 host->bus_ops = NULL;
1781}
1782
1783/*
1784 * Increase reference count of bus operator
1785 */
1786static inline void mmc_bus_get(struct mmc_host *host)
1787{
1788 unsigned long flags;
1789
1790 spin_lock_irqsave(&host->lock, flags);
1791 host->bus_refs++;
1792 spin_unlock_irqrestore(&host->lock, flags);
1793}
1794
1795/*
1796 * Decrease reference count of bus operator and free it if
1797 * it is the last reference.
1798 */
1799static inline void mmc_bus_put(struct mmc_host *host)
1800{
1801 unsigned long flags;
1802
1803 spin_lock_irqsave(&host->lock, flags);
1804 host->bus_refs--;
1805 if ((host->bus_refs == 0) && host->bus_ops)
1806 __mmc_release_bus(host);
1807 spin_unlock_irqrestore(&host->lock, flags);
1808}
1809
1810/*
1811 * Assign a mmc bus handler to a host. Only one bus handler may control a
1812 * host at any given time.
1813 */
1814void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
1815{
1816 unsigned long flags;
1817
1818 BUG_ON(!host);
1819 BUG_ON(!ops);
1820
1821 WARN_ON(!host->claimed);
1822
1823 spin_lock_irqsave(&host->lock, flags);
1824
1825 BUG_ON(host->bus_ops);
1826 BUG_ON(host->bus_refs);
1827
1828 host->bus_ops = ops;
1829 host->bus_refs = 1;
1830 host->bus_dead = 0;
1831
1832 spin_unlock_irqrestore(&host->lock, flags);
1833}
1834
1835/*
1836 * Remove the current bus handler from a host.
1837 */
1838void mmc_detach_bus(struct mmc_host *host)
1839{
1840 unsigned long flags;
1841
1842 BUG_ON(!host);
1843
1844 WARN_ON(!host->claimed);
1845 WARN_ON(!host->bus_ops);
1846
1847 spin_lock_irqsave(&host->lock, flags);
1848
1849 host->bus_dead = 1;
1850
1851 spin_unlock_irqrestore(&host->lock, flags);
1852
1853 mmc_bus_put(host);
1854}
1855
1856static void _mmc_detect_change(struct mmc_host *host, unsigned long delay,
1857 bool cd_irq)
1858{
1859#ifdef CONFIG_MMC_DEBUG
1860 unsigned long flags;
1861 spin_lock_irqsave(&host->lock, flags);
1862 WARN_ON(host->removed);
1863 spin_unlock_irqrestore(&host->lock, flags);
1864#endif
1865
1866 /*
1867 * If the device is configured as wakeup, we prevent a new sleep for
1868 * 5 s to give provision for user space to consume the event.
1869 */
1870 if (cd_irq && !(host->caps & MMC_CAP_NEEDS_POLL) &&
1871 device_can_wakeup(mmc_dev(host)))
1872 pm_wakeup_event(mmc_dev(host), 5000);
1873
1874 host->detect_change = 1;
1875 mmc_schedule_delayed_work(&host->detect, delay);
1876}
1877
1878/**
1879 * mmc_detect_change - process change of state on a MMC socket
1880 * @host: host which changed state.
1881 * @delay: optional delay to wait before detection (jiffies)
1882 *
1883 * MMC drivers should call this when they detect a card has been
1884 * inserted or removed. The MMC layer will confirm that any
1885 * present card is still functional, and initialize any newly
1886 * inserted.
1887 */
1888void mmc_detect_change(struct mmc_host *host, unsigned long delay)
1889{
1890 _mmc_detect_change(host, delay, true);
1891}
1892EXPORT_SYMBOL(mmc_detect_change);
1893
1894void mmc_init_erase(struct mmc_card *card)
1895{
1896 unsigned int sz;
1897
1898 if (is_power_of_2(card->erase_size))
1899 card->erase_shift = ffs(card->erase_size) - 1;
1900 else
1901 card->erase_shift = 0;
1902
1903 /*
1904 * It is possible to erase an arbitrarily large area of an SD or MMC
1905 * card. That is not desirable because it can take a long time
1906 * (minutes) potentially delaying more important I/O, and also the
1907 * timeout calculations become increasingly hugely over-estimated.
1908 * Consequently, 'pref_erase' is defined as a guide to limit erases
1909 * to that size and alignment.
1910 *
1911 * For SD cards that define Allocation Unit size, limit erases to one
1912 * Allocation Unit at a time. For MMC cards that define High Capacity
1913 * Erase Size, whether it is switched on or not, limit to that size.
1914 * Otherwise just have a stab at a good value. For modern cards it
1915 * will end up being 4MiB. Note that if the value is too small, it
1916 * can end up taking longer to erase.
1917 */
1918 if (mmc_card_sd(card) && card->ssr.au) {
1919 card->pref_erase = card->ssr.au;
1920 card->erase_shift = ffs(card->ssr.au) - 1;
1921 } else if (card->ext_csd.hc_erase_size) {
1922 card->pref_erase = card->ext_csd.hc_erase_size;
1923 } else if (card->erase_size) {
1924 sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
1925 if (sz < 128)
1926 card->pref_erase = 512 * 1024 / 512;
1927 else if (sz < 512)
1928 card->pref_erase = 1024 * 1024 / 512;
1929 else if (sz < 1024)
1930 card->pref_erase = 2 * 1024 * 1024 / 512;
1931 else
1932 card->pref_erase = 4 * 1024 * 1024 / 512;
1933 if (card->pref_erase < card->erase_size)
1934 card->pref_erase = card->erase_size;
1935 else {
1936 sz = card->pref_erase % card->erase_size;
1937 if (sz)
1938 card->pref_erase += card->erase_size - sz;
1939 }
1940 } else
1941 card->pref_erase = 0;
1942}
1943
1944static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
1945 unsigned int arg, unsigned int qty)
1946{
1947 unsigned int erase_timeout;
1948
1949 if (arg == MMC_DISCARD_ARG ||
1950 (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) {
1951 erase_timeout = card->ext_csd.trim_timeout;
1952 } else if (card->ext_csd.erase_group_def & 1) {
1953 /* High Capacity Erase Group Size uses HC timeouts */
1954 if (arg == MMC_TRIM_ARG)
1955 erase_timeout = card->ext_csd.trim_timeout;
1956 else
1957 erase_timeout = card->ext_csd.hc_erase_timeout;
1958 } else {
1959 /* CSD Erase Group Size uses write timeout */
1960 unsigned int mult = (10 << card->csd.r2w_factor);
1961 unsigned int timeout_clks = card->csd.tacc_clks * mult;
1962 unsigned int timeout_us;
1963
1964 /* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */
1965 if (card->csd.tacc_ns < 1000000)
1966 timeout_us = (card->csd.tacc_ns * mult) / 1000;
1967 else
1968 timeout_us = (card->csd.tacc_ns / 1000) * mult;
1969
1970 /*
1971 * ios.clock is only a target. The real clock rate might be
1972 * less but not that much less, so fudge it by multiplying by 2.
1973 */
1974 timeout_clks <<= 1;
1975 timeout_us += (timeout_clks * 1000) /
1976 (mmc_host_clk_rate(card->host) / 1000);
1977
1978 erase_timeout = timeout_us / 1000;
1979
1980 /*
1981 * Theoretically, the calculation could underflow so round up
1982 * to 1ms in that case.
1983 */
1984 if (!erase_timeout)
1985 erase_timeout = 1;
1986 }
1987
1988 /* Multiplier for secure operations */
1989 if (arg & MMC_SECURE_ARGS) {
1990 if (arg == MMC_SECURE_ERASE_ARG)
1991 erase_timeout *= card->ext_csd.sec_erase_mult;
1992 else
1993 erase_timeout *= card->ext_csd.sec_trim_mult;
1994 }
1995
1996 erase_timeout *= qty;
1997
1998 /*
1999 * Ensure at least a 1 second timeout for SPI as per
2000 * 'mmc_set_data_timeout()'
2001 */
2002 if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
2003 erase_timeout = 1000;
2004
2005 return erase_timeout;
2006}
2007
2008static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
2009 unsigned int arg,
2010 unsigned int qty)
2011{
2012 unsigned int erase_timeout;
2013
2014 if (card->ssr.erase_timeout) {
2015 /* Erase timeout specified in SD Status Register (SSR) */
2016 erase_timeout = card->ssr.erase_timeout * qty +
2017 card->ssr.erase_offset;
2018 } else {
2019 /*
2020 * Erase timeout not specified in SD Status Register (SSR) so
2021 * use 250ms per write block.
2022 */
2023 erase_timeout = 250 * qty;
2024 }
2025
2026 /* Must not be less than 1 second */
2027 if (erase_timeout < 1000)
2028 erase_timeout = 1000;
2029
2030 return erase_timeout;
2031}
2032
2033static unsigned int mmc_erase_timeout(struct mmc_card *card,
2034 unsigned int arg,
2035 unsigned int qty)
2036{
2037 if (mmc_card_sd(card))
2038 return mmc_sd_erase_timeout(card, arg, qty);
2039 else
2040 return mmc_mmc_erase_timeout(card, arg, qty);
2041}
2042
2043static int mmc_do_erase(struct mmc_card *card, unsigned int from,
2044 unsigned int to, unsigned int arg)
2045{
2046 struct mmc_command cmd = {0};
2047 unsigned int qty = 0;
2048 unsigned long timeout;
2049 int err;
2050
2051 mmc_retune_hold(card->host);
2052
2053 /*
2054 * qty is used to calculate the erase timeout which depends on how many
2055 * erase groups (or allocation units in SD terminology) are affected.
2056 * We count erasing part of an erase group as one erase group.
2057 * For SD, the allocation units are always a power of 2. For MMC, the
2058 * erase group size is almost certainly also power of 2, but it does not
2059 * seem to insist on that in the JEDEC standard, so we fall back to
2060 * division in that case. SD may not specify an allocation unit size,
2061 * in which case the timeout is based on the number of write blocks.
2062 *
2063 * Note that the timeout for secure trim 2 will only be correct if the
2064 * number of erase groups specified is the same as the total of all
2065 * preceding secure trim 1 commands. Since the power may have been
2066 * lost since the secure trim 1 commands occurred, it is generally
2067 * impossible to calculate the secure trim 2 timeout correctly.
2068 */
2069 if (card->erase_shift)
2070 qty += ((to >> card->erase_shift) -
2071 (from >> card->erase_shift)) + 1;
2072 else if (mmc_card_sd(card))
2073 qty += to - from + 1;
2074 else
2075 qty += ((to / card->erase_size) -
2076 (from / card->erase_size)) + 1;
2077
2078 if (!mmc_card_blockaddr(card)) {
2079 from <<= 9;
2080 to <<= 9;
2081 }
2082
2083 if (mmc_card_sd(card))
2084 cmd.opcode = SD_ERASE_WR_BLK_START;
2085 else
2086 cmd.opcode = MMC_ERASE_GROUP_START;
2087 cmd.arg = from;
2088 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2089 err = mmc_wait_for_cmd(card->host, &cmd, 0);
2090 if (err) {
2091 pr_err("mmc_erase: group start error %d, "
2092 "status %#x\n", err, cmd.resp[0]);
2093 err = -EIO;
2094 goto out;
2095 }
2096
2097 memset(&cmd, 0, sizeof(struct mmc_command));
2098 if (mmc_card_sd(card))
2099 cmd.opcode = SD_ERASE_WR_BLK_END;
2100 else
2101 cmd.opcode = MMC_ERASE_GROUP_END;
2102 cmd.arg = to;
2103 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2104 err = mmc_wait_for_cmd(card->host, &cmd, 0);
2105 if (err) {
2106 pr_err("mmc_erase: group end error %d, status %#x\n",
2107 err, cmd.resp[0]);
2108 err = -EIO;
2109 goto out;
2110 }
2111
2112 memset(&cmd, 0, sizeof(struct mmc_command));
2113 cmd.opcode = MMC_ERASE;
2114 cmd.arg = arg;
2115 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
2116 cmd.busy_timeout = mmc_erase_timeout(card, arg, qty);
2117 err = mmc_wait_for_cmd(card->host, &cmd, 0);
2118 if (err) {
2119 pr_err("mmc_erase: erase error %d, status %#x\n",
2120 err, cmd.resp[0]);
2121 err = -EIO;
2122 goto out;
2123 }
2124
2125 if (mmc_host_is_spi(card->host))
2126 goto out;
2127
2128 timeout = jiffies + msecs_to_jiffies(MMC_CORE_TIMEOUT_MS);
2129 do {
2130 memset(&cmd, 0, sizeof(struct mmc_command));
2131 cmd.opcode = MMC_SEND_STATUS;
2132 cmd.arg = card->rca << 16;
2133 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
2134 /* Do not retry else we can't see errors */
2135 err = mmc_wait_for_cmd(card->host, &cmd, 0);
2136 if (err || (cmd.resp[0] & 0xFDF92000)) {
2137 pr_err("error %d requesting status %#x\n",
2138 err, cmd.resp[0]);
2139 err = -EIO;
2140 goto out;
2141 }
2142
2143 /* Timeout if the device never becomes ready for data and
2144 * never leaves the program state.
2145 */
2146 if (time_after(jiffies, timeout)) {
2147 pr_err("%s: Card stuck in programming state! %s\n",
2148 mmc_hostname(card->host), __func__);
2149 err = -EIO;
2150 goto out;
2151 }
2152
2153 } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
2154 (R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG));
2155out:
2156 mmc_retune_release(card->host);
2157 return err;
2158}
2159
2160/**
2161 * mmc_erase - erase sectors.
2162 * @card: card to erase
2163 * @from: first sector to erase
2164 * @nr: number of sectors to erase
2165 * @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
2166 *
2167 * Caller must claim host before calling this function.
2168 */
2169int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
2170 unsigned int arg)
2171{
2172 unsigned int rem, to = from + nr;
2173 int err;
2174
2175 if (!(card->host->caps & MMC_CAP_ERASE) ||
2176 !(card->csd.cmdclass & CCC_ERASE))
2177 return -EOPNOTSUPP;
2178
2179 if (!card->erase_size)
2180 return -EOPNOTSUPP;
2181
2182 if (mmc_card_sd(card) && arg != MMC_ERASE_ARG)
2183 return -EOPNOTSUPP;
2184
2185 if ((arg & MMC_SECURE_ARGS) &&
2186 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
2187 return -EOPNOTSUPP;
2188
2189 if ((arg & MMC_TRIM_ARGS) &&
2190 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
2191 return -EOPNOTSUPP;
2192
2193 if (arg == MMC_SECURE_ERASE_ARG) {
2194 if (from % card->erase_size || nr % card->erase_size)
2195 return -EINVAL;
2196 }
2197
2198 if (arg == MMC_ERASE_ARG) {
2199 rem = from % card->erase_size;
2200 if (rem) {
2201 rem = card->erase_size - rem;
2202 from += rem;
2203 if (nr > rem)
2204 nr -= rem;
2205 else
2206 return 0;
2207 }
2208 rem = nr % card->erase_size;
2209 if (rem)
2210 nr -= rem;
2211 }
2212
2213 if (nr == 0)
2214 return 0;
2215
2216 to = from + nr;
2217
2218 if (to <= from)
2219 return -EINVAL;
2220
2221 /* 'from' and 'to' are inclusive */
2222 to -= 1;
2223
2224 /*
2225 * Special case where only one erase-group fits in the timeout budget:
2226 * If the region crosses an erase-group boundary on this particular
2227 * case, we will be trimming more than one erase-group which, does not
2228 * fit in the timeout budget of the controller, so we need to split it
2229 * and call mmc_do_erase() twice if necessary. This special case is
2230 * identified by the card->eg_boundary flag.
2231 */
2232 rem = card->erase_size - (from % card->erase_size);
2233 if ((arg & MMC_TRIM_ARGS) && (card->eg_boundary) && (nr > rem)) {
2234 err = mmc_do_erase(card, from, from + rem - 1, arg);
2235 from += rem;
2236 if ((err) || (to <= from))
2237 return err;
2238 }
2239
2240 return mmc_do_erase(card, from, to, arg);
2241}
2242EXPORT_SYMBOL(mmc_erase);
2243
2244int mmc_can_erase(struct mmc_card *card)
2245{
2246 if ((card->host->caps & MMC_CAP_ERASE) &&
2247 (card->csd.cmdclass & CCC_ERASE) && card->erase_size)
2248 return 1;
2249 return 0;
2250}
2251EXPORT_SYMBOL(mmc_can_erase);
2252
2253int mmc_can_trim(struct mmc_card *card)
2254{
2255 if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) &&
2256 (!(card->quirks & MMC_QUIRK_TRIM_BROKEN)))
2257 return 1;
2258 return 0;
2259}
2260EXPORT_SYMBOL(mmc_can_trim);
2261
2262int mmc_can_discard(struct mmc_card *card)
2263{
2264 /*
2265 * As there's no way to detect the discard support bit at v4.5
2266 * use the s/w feature support filed.
2267 */
2268 if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE)
2269 return 1;
2270 return 0;
2271}
2272EXPORT_SYMBOL(mmc_can_discard);
2273
2274int mmc_can_sanitize(struct mmc_card *card)
2275{
2276 if (!mmc_can_trim(card) && !mmc_can_erase(card))
2277 return 0;
2278 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
2279 return 1;
2280 return 0;
2281}
2282EXPORT_SYMBOL(mmc_can_sanitize);
2283
2284int mmc_can_secure_erase_trim(struct mmc_card *card)
2285{
2286 if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) &&
2287 !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
2288 return 1;
2289 return 0;
2290}
2291EXPORT_SYMBOL(mmc_can_secure_erase_trim);
2292
2293int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
2294 unsigned int nr)
2295{
2296 if (!card->erase_size)
2297 return 0;
2298 if (from % card->erase_size || nr % card->erase_size)
2299 return 0;
2300 return 1;
2301}
2302EXPORT_SYMBOL(mmc_erase_group_aligned);
2303
2304static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
2305 unsigned int arg)
2306{
2307 struct mmc_host *host = card->host;
2308 unsigned int max_discard, x, y, qty = 0, max_qty, timeout;
2309 unsigned int last_timeout = 0;
2310
2311 if (card->erase_shift)
2312 max_qty = UINT_MAX >> card->erase_shift;
2313 else if (mmc_card_sd(card))
2314 max_qty = UINT_MAX;
2315 else
2316 max_qty = UINT_MAX / card->erase_size;
2317
2318 /* Find the largest qty with an OK timeout */
2319 do {
2320 y = 0;
2321 for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
2322 timeout = mmc_erase_timeout(card, arg, qty + x);
2323 if (timeout > host->max_busy_timeout)
2324 break;
2325 if (timeout < last_timeout)
2326 break;
2327 last_timeout = timeout;
2328 y = x;
2329 }
2330 qty += y;
2331 } while (y);
2332
2333 if (!qty)
2334 return 0;
2335
2336 /*
2337 * When specifying a sector range to trim, chances are we might cross
2338 * an erase-group boundary even if the amount of sectors is less than
2339 * one erase-group.
2340 * If we can only fit one erase-group in the controller timeout budget,
2341 * we have to care that erase-group boundaries are not crossed by a
2342 * single trim operation. We flag that special case with "eg_boundary".
2343 * In all other cases we can just decrement qty and pretend that we
2344 * always touch (qty + 1) erase-groups as a simple optimization.
2345 */
2346 if (qty == 1)
2347 card->eg_boundary = 1;
2348 else
2349 qty--;
2350
2351 /* Convert qty to sectors */
2352 if (card->erase_shift)
2353 max_discard = qty << card->erase_shift;
2354 else if (mmc_card_sd(card))
2355 max_discard = qty + 1;
2356 else
2357 max_discard = qty * card->erase_size;
2358
2359 return max_discard;
2360}
2361
2362unsigned int mmc_calc_max_discard(struct mmc_card *card)
2363{
2364 struct mmc_host *host = card->host;
2365 unsigned int max_discard, max_trim;
2366
2367 if (!host->max_busy_timeout)
2368 return UINT_MAX;
2369
2370 /*
2371 * Without erase_group_def set, MMC erase timeout depends on clock
2372 * frequence which can change. In that case, the best choice is
2373 * just the preferred erase size.
2374 */
2375 if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
2376 return card->pref_erase;
2377
2378 max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
2379 if (mmc_can_trim(card)) {
2380 max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
2381 if (max_trim < max_discard)
2382 max_discard = max_trim;
2383 } else if (max_discard < card->erase_size) {
2384 max_discard = 0;
2385 }
2386 pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
2387 mmc_hostname(host), max_discard, host->max_busy_timeout);
2388 return max_discard;
2389}
2390EXPORT_SYMBOL(mmc_calc_max_discard);
2391
2392int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
2393{
2394 struct mmc_command cmd = {0};
2395
2396 if (mmc_card_blockaddr(card) || mmc_card_ddr52(card))
2397 return 0;
2398
2399 cmd.opcode = MMC_SET_BLOCKLEN;
2400 cmd.arg = blocklen;
2401 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2402 return mmc_wait_for_cmd(card->host, &cmd, 5);
2403}
2404EXPORT_SYMBOL(mmc_set_blocklen);
2405
2406int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount,
2407 bool is_rel_write)
2408{
2409 struct mmc_command cmd = {0};
2410
2411 cmd.opcode = MMC_SET_BLOCK_COUNT;
2412 cmd.arg = blockcount & 0x0000FFFF;
2413 if (is_rel_write)
2414 cmd.arg |= 1 << 31;
2415 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2416 return mmc_wait_for_cmd(card->host, &cmd, 5);
2417}
2418EXPORT_SYMBOL(mmc_set_blockcount);
2419
2420static void mmc_hw_reset_for_init(struct mmc_host *host)
2421{
2422 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
2423 return;
2424 mmc_host_clk_hold(host);
2425 host->ops->hw_reset(host);
2426 mmc_host_clk_release(host);
2427}
2428
2429int mmc_hw_reset(struct mmc_host *host)
2430{
2431 int ret;
2432
2433 if (!host->card)
2434 return -EINVAL;
2435
2436 mmc_bus_get(host);
2437 if (!host->bus_ops || host->bus_dead || !host->bus_ops->reset) {
2438 mmc_bus_put(host);
2439 return -EOPNOTSUPP;
2440 }
2441
2442 ret = host->bus_ops->reset(host);
2443 mmc_bus_put(host);
2444
2445 if (ret != -EOPNOTSUPP)
2446 pr_warn("%s: tried to reset card\n", mmc_hostname(host));
2447
2448 return ret;
2449}
2450EXPORT_SYMBOL(mmc_hw_reset);
2451
2452static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
2453{
2454 host->f_init = freq;
2455
2456#ifdef CONFIG_MMC_DEBUG
2457 pr_info("%s: %s: trying to init card at %u Hz\n",
2458 mmc_hostname(host), __func__, host->f_init);
2459#endif
2460 mmc_power_up(host, host->ocr_avail);
2461
2462 /*
2463 * Some eMMCs (with VCCQ always on) may not be reset after power up, so
2464 * do a hardware reset if possible.
2465 */
2466 mmc_hw_reset_for_init(host);
2467
2468 /*
2469 * sdio_reset sends CMD52 to reset card. Since we do not know
2470 * if the card is being re-initialized, just send it. CMD52
2471 * should be ignored by SD/eMMC cards.
2472 */
2473 sdio_reset(host);
2474 mmc_go_idle(host);
2475
2476 mmc_send_if_cond(host, host->ocr_avail);
2477
2478 /* Order's important: probe SDIO, then SD, then MMC */
2479 if (!mmc_attach_sdio(host))
2480 return 0;
2481 if (!mmc_attach_sd(host))
2482 return 0;
2483 if (!mmc_attach_mmc(host))
2484 return 0;
2485
2486 mmc_power_off(host);
2487 return -EIO;
2488}
2489
2490int _mmc_detect_card_removed(struct mmc_host *host)
2491{
2492 int ret;
2493
2494 if (host->caps & MMC_CAP_NONREMOVABLE)
2495 return 0;
2496
2497 if (!host->card || mmc_card_removed(host->card))
2498 return 1;
2499
2500 ret = host->bus_ops->alive(host);
2501
2502 /*
2503 * Card detect status and alive check may be out of sync if card is
2504 * removed slowly, when card detect switch changes while card/slot
2505 * pads are still contacted in hardware (refer to "SD Card Mechanical
2506 * Addendum, Appendix C: Card Detection Switch"). So reschedule a
2507 * detect work 200ms later for this case.
2508 */
2509 if (!ret && host->ops->get_cd && !host->ops->get_cd(host)) {
2510 mmc_detect_change(host, msecs_to_jiffies(200));
2511 pr_debug("%s: card removed too slowly\n", mmc_hostname(host));
2512 }
2513
2514 if (ret) {
2515 mmc_card_set_removed(host->card);
2516 pr_debug("%s: card remove detected\n", mmc_hostname(host));
2517 }
2518
2519 return ret;
2520}
2521
2522int mmc_detect_card_removed(struct mmc_host *host)
2523{
2524 struct mmc_card *card = host->card;
2525 int ret;
2526
2527 WARN_ON(!host->claimed);
2528
2529 if (!card)
2530 return 1;
2531
2532 ret = mmc_card_removed(card);
2533 /*
2534 * The card will be considered unchanged unless we have been asked to
2535 * detect a change or host requires polling to provide card detection.
2536 */
2537 if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL))
2538 return ret;
2539
2540 host->detect_change = 0;
2541 if (!ret) {
2542 ret = _mmc_detect_card_removed(host);
2543 if (ret && (host->caps & MMC_CAP_NEEDS_POLL)) {
2544 /*
2545 * Schedule a detect work as soon as possible to let a
2546 * rescan handle the card removal.
2547 */
2548 cancel_delayed_work(&host->detect);
2549 _mmc_detect_change(host, 0, false);
2550 }
2551 }
2552
2553 return ret;
2554}
2555EXPORT_SYMBOL(mmc_detect_card_removed);
2556
2557void mmc_rescan(struct work_struct *work)
2558{
2559 struct mmc_host *host =
2560 container_of(work, struct mmc_host, detect.work);
2561 int i;
2562
2563 if (host->trigger_card_event && host->ops->card_event) {
2564 host->ops->card_event(host);
2565 host->trigger_card_event = false;
2566 }
2567
2568 if (host->rescan_disable)
2569 return;
2570
2571 /* If there is a non-removable card registered, only scan once */
2572 if ((host->caps & MMC_CAP_NONREMOVABLE) && host->rescan_entered)
2573 return;
2574 host->rescan_entered = 1;
2575
2576 mmc_bus_get(host);
2577
2578 /*
2579 * if there is a _removable_ card registered, check whether it is
2580 * still present
2581 */
2582 if (host->bus_ops && !host->bus_dead
2583 && !(host->caps & MMC_CAP_NONREMOVABLE))
2584 host->bus_ops->detect(host);
2585
2586 host->detect_change = 0;
2587
2588 /*
2589 * Let mmc_bus_put() free the bus/bus_ops if we've found that
2590 * the card is no longer present.
2591 */
2592 mmc_bus_put(host);
2593 mmc_bus_get(host);
2594
2595 /* if there still is a card present, stop here */
2596 if (host->bus_ops != NULL) {
2597 mmc_bus_put(host);
2598 goto out;
2599 }
2600
2601 /*
2602 * Only we can add a new handler, so it's safe to
2603 * release the lock here.
2604 */
2605 mmc_bus_put(host);
2606
2607 if (!(host->caps & MMC_CAP_NONREMOVABLE) && host->ops->get_cd &&
2608 host->ops->get_cd(host) == 0) {
2609 mmc_claim_host(host);
2610 mmc_power_off(host);
2611 mmc_release_host(host);
2612 goto out;
2613 }
2614
2615 mmc_claim_host(host);
2616 for (i = 0; i < ARRAY_SIZE(freqs); i++) {
2617 if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
2618 break;
2619 if (freqs[i] <= host->f_min)
2620 break;
2621 }
2622 mmc_release_host(host);
2623
2624 out:
2625 if (host->caps & MMC_CAP_NEEDS_POLL)
2626 mmc_schedule_delayed_work(&host->detect, HZ);
2627}
2628
2629void mmc_start_host(struct mmc_host *host)
2630{
2631 host->f_init = max(freqs[0], host->f_min);
2632 host->rescan_disable = 0;
2633 host->ios.power_mode = MMC_POWER_UNDEFINED;
2634 if (host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)
2635 mmc_power_off(host);
2636 else
2637 mmc_power_up(host, host->ocr_avail);
2638 mmc_gpiod_request_cd_irq(host);
2639 _mmc_detect_change(host, 0, false);
2640}
2641
2642void mmc_stop_host(struct mmc_host *host)
2643{
2644#ifdef CONFIG_MMC_DEBUG
2645 unsigned long flags;
2646 spin_lock_irqsave(&host->lock, flags);
2647 host->removed = 1;
2648 spin_unlock_irqrestore(&host->lock, flags);
2649#endif
2650 if (host->slot.cd_irq >= 0)
2651 disable_irq(host->slot.cd_irq);
2652
2653 host->rescan_disable = 1;
2654 cancel_delayed_work_sync(&host->detect);
2655 mmc_flush_scheduled_work();
2656
2657 /* clear pm flags now and let card drivers set them as needed */
2658 host->pm_flags = 0;
2659
2660 mmc_bus_get(host);
2661 if (host->bus_ops && !host->bus_dead) {
2662 /* Calling bus_ops->remove() with a claimed host can deadlock */
2663 host->bus_ops->remove(host);
2664 mmc_claim_host(host);
2665 mmc_detach_bus(host);
2666 mmc_power_off(host);
2667 mmc_release_host(host);
2668 mmc_bus_put(host);
2669 return;
2670 }
2671 mmc_bus_put(host);
2672
2673 BUG_ON(host->card);
2674
2675 mmc_power_off(host);
2676}
2677
2678int mmc_power_save_host(struct mmc_host *host)
2679{
2680 int ret = 0;
2681
2682#ifdef CONFIG_MMC_DEBUG
2683 pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__);
2684#endif
2685
2686 mmc_bus_get(host);
2687
2688 if (!host->bus_ops || host->bus_dead) {
2689 mmc_bus_put(host);
2690 return -EINVAL;
2691 }
2692
2693 if (host->bus_ops->power_save)
2694 ret = host->bus_ops->power_save(host);
2695
2696 mmc_bus_put(host);
2697
2698 mmc_power_off(host);
2699
2700 return ret;
2701}
2702EXPORT_SYMBOL(mmc_power_save_host);
2703
2704int mmc_power_restore_host(struct mmc_host *host)
2705{
2706 int ret;
2707
2708#ifdef CONFIG_MMC_DEBUG
2709 pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__);
2710#endif
2711
2712 mmc_bus_get(host);
2713
2714 if (!host->bus_ops || host->bus_dead) {
2715 mmc_bus_put(host);
2716 return -EINVAL;
2717 }
2718
2719 mmc_power_up(host, host->card->ocr);
2720 ret = host->bus_ops->power_restore(host);
2721
2722 mmc_bus_put(host);
2723
2724 return ret;
2725}
2726EXPORT_SYMBOL(mmc_power_restore_host);
2727
2728/*
2729 * Flush the cache to the non-volatile storage.
2730 */
2731int mmc_flush_cache(struct mmc_card *card)
2732{
2733 int err = 0;
2734
2735 if (mmc_card_mmc(card) &&
2736 (card->ext_csd.cache_size > 0) &&
2737 (card->ext_csd.cache_ctrl & 1)) {
2738 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2739 EXT_CSD_FLUSH_CACHE, 1, 0);
2740 if (err)
2741 pr_err("%s: cache flush error %d\n",
2742 mmc_hostname(card->host), err);
2743 }
2744
2745 return err;
2746}
2747EXPORT_SYMBOL(mmc_flush_cache);
2748
2749#ifdef CONFIG_PM
2750
2751/* Do the card removal on suspend if card is assumed removeable
2752 * Do that in pm notifier while userspace isn't yet frozen, so we will be able
2753 to sync the card.
2754*/
2755int mmc_pm_notify(struct notifier_block *notify_block,
2756 unsigned long mode, void *unused)
2757{
2758 struct mmc_host *host = container_of(
2759 notify_block, struct mmc_host, pm_notify);
2760 unsigned long flags;
2761 int err = 0;
2762
2763 switch (mode) {
2764 case PM_HIBERNATION_PREPARE:
2765 case PM_SUSPEND_PREPARE:
2766 case PM_RESTORE_PREPARE:
2767 spin_lock_irqsave(&host->lock, flags);
2768 host->rescan_disable = 1;
2769 spin_unlock_irqrestore(&host->lock, flags);
2770 cancel_delayed_work_sync(&host->detect);
2771
2772 if (!host->bus_ops)
2773 break;
2774
2775 /* Validate prerequisites for suspend */
2776 if (host->bus_ops->pre_suspend)
2777 err = host->bus_ops->pre_suspend(host);
2778 if (!err)
2779 break;
2780
2781 /* Calling bus_ops->remove() with a claimed host can deadlock */
2782 host->bus_ops->remove(host);
2783 mmc_claim_host(host);
2784 mmc_detach_bus(host);
2785 mmc_power_off(host);
2786 mmc_release_host(host);
2787 host->pm_flags = 0;
2788 break;
2789
2790 case PM_POST_SUSPEND:
2791 case PM_POST_HIBERNATION:
2792 case PM_POST_RESTORE:
2793
2794 spin_lock_irqsave(&host->lock, flags);
2795 host->rescan_disable = 0;
2796 spin_unlock_irqrestore(&host->lock, flags);
2797 _mmc_detect_change(host, 0, false);
2798
2799 }
2800
2801 return 0;
2802}
2803#endif
2804
2805/**
2806 * mmc_init_context_info() - init synchronization context
2807 * @host: mmc host
2808 *
2809 * Init struct context_info needed to implement asynchronous
2810 * request mechanism, used by mmc core, host driver and mmc requests
2811 * supplier.
2812 */
2813void mmc_init_context_info(struct mmc_host *host)
2814{
2815 spin_lock_init(&host->context_info.lock);
2816 host->context_info.is_new_req = false;
2817 host->context_info.is_done_rcv = false;
2818 host->context_info.is_waiting_last_req = false;
2819 init_waitqueue_head(&host->context_info.wait);
2820}
2821
2822static int __init mmc_init(void)
2823{
2824 int ret;
2825
2826 workqueue = alloc_ordered_workqueue("kmmcd", 0);
2827 if (!workqueue)
2828 return -ENOMEM;
2829
2830 ret = mmc_register_bus();
2831 if (ret)
2832 goto destroy_workqueue;
2833
2834 ret = mmc_register_host_class();
2835 if (ret)
2836 goto unregister_bus;
2837
2838 ret = sdio_register_bus();
2839 if (ret)
2840 goto unregister_host_class;
2841
2842 return 0;
2843
2844unregister_host_class:
2845 mmc_unregister_host_class();
2846unregister_bus:
2847 mmc_unregister_bus();
2848destroy_workqueue:
2849 destroy_workqueue(workqueue);
2850
2851 return ret;
2852}
2853
2854static void __exit mmc_exit(void)
2855{
2856 sdio_unregister_bus();
2857 mmc_unregister_host_class();
2858 mmc_unregister_bus();
2859 destroy_workqueue(workqueue);
2860}
2861
2862subsys_initcall(mmc_init);
2863module_exit(mmc_exit);
2864
2865MODULE_LICENSE("GPL");