Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * linux/drivers/mmc/core/core.c
3 *
4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
5 * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
6 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
7 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/interrupt.h>
16#include <linux/completion.h>
17#include <linux/device.h>
18#include <linux/delay.h>
19#include <linux/pagemap.h>
20#include <linux/err.h>
21#include <linux/leds.h>
22#include <linux/scatterlist.h>
23#include <linux/log2.h>
24#include <linux/regulator/consumer.h>
25#include <linux/pm_runtime.h>
26#include <linux/pm_wakeup.h>
27#include <linux/suspend.h>
28#include <linux/fault-inject.h>
29#include <linux/random.h>
30#include <linux/slab.h>
31#include <linux/of.h>
32
33#include <linux/mmc/card.h>
34#include <linux/mmc/host.h>
35#include <linux/mmc/mmc.h>
36#include <linux/mmc/sd.h>
37#include <linux/mmc/slot-gpio.h>
38
39#define CREATE_TRACE_POINTS
40#include <trace/events/mmc.h>
41
42#include "core.h"
43#include "card.h"
44#include "bus.h"
45#include "host.h"
46#include "sdio_bus.h"
47#include "pwrseq.h"
48
49#include "mmc_ops.h"
50#include "sd_ops.h"
51#include "sdio_ops.h"
52
53/* If the device is not responding */
54#define MMC_CORE_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
55
56/*
57 * Background operations can take a long time, depending on the housekeeping
58 * operations the card has to perform.
59 */
60#define MMC_BKOPS_MAX_TIMEOUT (4 * 60 * 1000) /* max time to wait in ms */
61
62/* The max erase timeout, used when host->max_busy_timeout isn't specified */
63#define MMC_ERASE_TIMEOUT_MS (60 * 1000) /* 60 s */
64
65static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
66
67/*
68 * Enabling software CRCs on the data blocks can be a significant (30%)
69 * performance cost, and for other reasons may not always be desired.
70 * So we allow it it to be disabled.
71 */
72bool use_spi_crc = 1;
73module_param(use_spi_crc, bool, 0);
74
75static int mmc_schedule_delayed_work(struct delayed_work *work,
76 unsigned long delay)
77{
78 /*
79 * We use the system_freezable_wq, because of two reasons.
80 * First, it allows several works (not the same work item) to be
81 * executed simultaneously. Second, the queue becomes frozen when
82 * userspace becomes frozen during system PM.
83 */
84 return queue_delayed_work(system_freezable_wq, work, delay);
85}
86
87#ifdef CONFIG_FAIL_MMC_REQUEST
88
89/*
90 * Internal function. Inject random data errors.
91 * If mmc_data is NULL no errors are injected.
92 */
93static void mmc_should_fail_request(struct mmc_host *host,
94 struct mmc_request *mrq)
95{
96 struct mmc_command *cmd = mrq->cmd;
97 struct mmc_data *data = mrq->data;
98 static const int data_errors[] = {
99 -ETIMEDOUT,
100 -EILSEQ,
101 -EIO,
102 };
103
104 if (!data)
105 return;
106
107 if (cmd->error || data->error ||
108 !should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
109 return;
110
111 data->error = data_errors[prandom_u32() % ARRAY_SIZE(data_errors)];
112 data->bytes_xfered = (prandom_u32() % (data->bytes_xfered >> 9)) << 9;
113}
114
115#else /* CONFIG_FAIL_MMC_REQUEST */
116
117static inline void mmc_should_fail_request(struct mmc_host *host,
118 struct mmc_request *mrq)
119{
120}
121
122#endif /* CONFIG_FAIL_MMC_REQUEST */
123
124static inline void mmc_complete_cmd(struct mmc_request *mrq)
125{
126 if (mrq->cap_cmd_during_tfr && !completion_done(&mrq->cmd_completion))
127 complete_all(&mrq->cmd_completion);
128}
129
130void mmc_command_done(struct mmc_host *host, struct mmc_request *mrq)
131{
132 if (!mrq->cap_cmd_during_tfr)
133 return;
134
135 mmc_complete_cmd(mrq);
136
137 pr_debug("%s: cmd done, tfr ongoing (CMD%u)\n",
138 mmc_hostname(host), mrq->cmd->opcode);
139}
140EXPORT_SYMBOL(mmc_command_done);
141
142/**
143 * mmc_request_done - finish processing an MMC request
144 * @host: MMC host which completed request
145 * @mrq: MMC request which request
146 *
147 * MMC drivers should call this function when they have completed
148 * their processing of a request.
149 */
150void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
151{
152 struct mmc_command *cmd = mrq->cmd;
153 int err = cmd->error;
154
155 /* Flag re-tuning needed on CRC errors */
156 if ((cmd->opcode != MMC_SEND_TUNING_BLOCK &&
157 cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) &&
158 (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) ||
159 (mrq->data && mrq->data->error == -EILSEQ) ||
160 (mrq->stop && mrq->stop->error == -EILSEQ)))
161 mmc_retune_needed(host);
162
163 if (err && cmd->retries && mmc_host_is_spi(host)) {
164 if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
165 cmd->retries = 0;
166 }
167
168 if (host->ongoing_mrq == mrq)
169 host->ongoing_mrq = NULL;
170
171 mmc_complete_cmd(mrq);
172
173 trace_mmc_request_done(host, mrq);
174
175 /*
176 * We list various conditions for the command to be considered
177 * properly done:
178 *
179 * - There was no error, OK fine then
180 * - We are not doing some kind of retry
181 * - The card was removed (...so just complete everything no matter
182 * if there are errors or retries)
183 */
184 if (!err || !cmd->retries || mmc_card_removed(host->card)) {
185 mmc_should_fail_request(host, mrq);
186
187 if (!host->ongoing_mrq)
188 led_trigger_event(host->led, LED_OFF);
189
190 if (mrq->sbc) {
191 pr_debug("%s: req done <CMD%u>: %d: %08x %08x %08x %08x\n",
192 mmc_hostname(host), mrq->sbc->opcode,
193 mrq->sbc->error,
194 mrq->sbc->resp[0], mrq->sbc->resp[1],
195 mrq->sbc->resp[2], mrq->sbc->resp[3]);
196 }
197
198 pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
199 mmc_hostname(host), cmd->opcode, err,
200 cmd->resp[0], cmd->resp[1],
201 cmd->resp[2], cmd->resp[3]);
202
203 if (mrq->data) {
204 pr_debug("%s: %d bytes transferred: %d\n",
205 mmc_hostname(host),
206 mrq->data->bytes_xfered, mrq->data->error);
207 }
208
209 if (mrq->stop) {
210 pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n",
211 mmc_hostname(host), mrq->stop->opcode,
212 mrq->stop->error,
213 mrq->stop->resp[0], mrq->stop->resp[1],
214 mrq->stop->resp[2], mrq->stop->resp[3]);
215 }
216 }
217 /*
218 * Request starter must handle retries - see
219 * mmc_wait_for_req_done().
220 */
221 if (mrq->done)
222 mrq->done(mrq);
223}
224
225EXPORT_SYMBOL(mmc_request_done);
226
227static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
228{
229 int err;
230
231 /* Assumes host controller has been runtime resumed by mmc_claim_host */
232 err = mmc_retune(host);
233 if (err) {
234 mrq->cmd->error = err;
235 mmc_request_done(host, mrq);
236 return;
237 }
238
239 /*
240 * For sdio rw commands we must wait for card busy otherwise some
241 * sdio devices won't work properly.
242 * And bypass I/O abort, reset and bus suspend operations.
243 */
244 if (sdio_is_io_busy(mrq->cmd->opcode, mrq->cmd->arg) &&
245 host->ops->card_busy) {
246 int tries = 500; /* Wait aprox 500ms at maximum */
247
248 while (host->ops->card_busy(host) && --tries)
249 mmc_delay(1);
250
251 if (tries == 0) {
252 mrq->cmd->error = -EBUSY;
253 mmc_request_done(host, mrq);
254 return;
255 }
256 }
257
258 if (mrq->cap_cmd_during_tfr) {
259 host->ongoing_mrq = mrq;
260 /*
261 * Retry path could come through here without having waiting on
262 * cmd_completion, so ensure it is reinitialised.
263 */
264 reinit_completion(&mrq->cmd_completion);
265 }
266
267 trace_mmc_request_start(host, mrq);
268
269 host->ops->request(host, mrq);
270}
271
272static void mmc_mrq_pr_debug(struct mmc_host *host, struct mmc_request *mrq)
273{
274 if (mrq->sbc) {
275 pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
276 mmc_hostname(host), mrq->sbc->opcode,
277 mrq->sbc->arg, mrq->sbc->flags);
278 }
279
280 if (mrq->cmd) {
281 pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
282 mmc_hostname(host), mrq->cmd->opcode, mrq->cmd->arg,
283 mrq->cmd->flags);
284 }
285
286 if (mrq->data) {
287 pr_debug("%s: blksz %d blocks %d flags %08x "
288 "tsac %d ms nsac %d\n",
289 mmc_hostname(host), mrq->data->blksz,
290 mrq->data->blocks, mrq->data->flags,
291 mrq->data->timeout_ns / 1000000,
292 mrq->data->timeout_clks);
293 }
294
295 if (mrq->stop) {
296 pr_debug("%s: CMD%u arg %08x flags %08x\n",
297 mmc_hostname(host), mrq->stop->opcode,
298 mrq->stop->arg, mrq->stop->flags);
299 }
300}
301
302static int mmc_mrq_prep(struct mmc_host *host, struct mmc_request *mrq)
303{
304#ifdef CONFIG_MMC_DEBUG
305 unsigned int i, sz;
306 struct scatterlist *sg;
307#endif
308
309 if (mrq->cmd) {
310 mrq->cmd->error = 0;
311 mrq->cmd->mrq = mrq;
312 mrq->cmd->data = mrq->data;
313 }
314 if (mrq->sbc) {
315 mrq->sbc->error = 0;
316 mrq->sbc->mrq = mrq;
317 }
318 if (mrq->data) {
319 if (mrq->data->blksz > host->max_blk_size ||
320 mrq->data->blocks > host->max_blk_count ||
321 mrq->data->blocks * mrq->data->blksz > host->max_req_size)
322 return -EINVAL;
323#ifdef CONFIG_MMC_DEBUG
324 sz = 0;
325 for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
326 sz += sg->length;
327 if (sz != mrq->data->blocks * mrq->data->blksz)
328 return -EINVAL;
329#endif
330 mrq->data->error = 0;
331 mrq->data->mrq = mrq;
332 if (mrq->stop) {
333 mrq->data->stop = mrq->stop;
334 mrq->stop->error = 0;
335 mrq->stop->mrq = mrq;
336 }
337 }
338
339 return 0;
340}
341
342static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
343{
344 int err;
345
346 mmc_retune_hold(host);
347
348 if (mmc_card_removed(host->card))
349 return -ENOMEDIUM;
350
351 mmc_mrq_pr_debug(host, mrq);
352
353 WARN_ON(!host->claimed);
354
355 err = mmc_mrq_prep(host, mrq);
356 if (err)
357 return err;
358
359 led_trigger_event(host->led, LED_FULL);
360 __mmc_start_request(host, mrq);
361
362 return 0;
363}
364
365/**
366 * mmc_start_bkops - start BKOPS for supported cards
367 * @card: MMC card to start BKOPS
368 * @form_exception: A flag to indicate if this function was
369 * called due to an exception raised by the card
370 *
371 * Start background operations whenever requested.
372 * When the urgent BKOPS bit is set in a R1 command response
373 * then background operations should be started immediately.
374*/
375void mmc_start_bkops(struct mmc_card *card, bool from_exception)
376{
377 int err;
378 int timeout;
379 bool use_busy_signal;
380
381 if (!card->ext_csd.man_bkops_en || mmc_card_doing_bkops(card))
382 return;
383
384 err = mmc_read_bkops_status(card);
385 if (err) {
386 pr_err("%s: Failed to read bkops status: %d\n",
387 mmc_hostname(card->host), err);
388 return;
389 }
390
391 if (!card->ext_csd.raw_bkops_status)
392 return;
393
394 if (card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2 &&
395 from_exception)
396 return;
397
398 mmc_claim_host(card->host);
399 if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) {
400 timeout = MMC_BKOPS_MAX_TIMEOUT;
401 use_busy_signal = true;
402 } else {
403 timeout = 0;
404 use_busy_signal = false;
405 }
406
407 mmc_retune_hold(card->host);
408
409 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
410 EXT_CSD_BKOPS_START, 1, timeout, 0,
411 use_busy_signal, true, false);
412 if (err) {
413 pr_warn("%s: Error %d starting bkops\n",
414 mmc_hostname(card->host), err);
415 mmc_retune_release(card->host);
416 goto out;
417 }
418
419 /*
420 * For urgent bkops status (LEVEL_2 and more)
421 * bkops executed synchronously, otherwise
422 * the operation is in progress
423 */
424 if (!use_busy_signal)
425 mmc_card_set_doing_bkops(card);
426 else
427 mmc_retune_release(card->host);
428out:
429 mmc_release_host(card->host);
430}
431EXPORT_SYMBOL(mmc_start_bkops);
432
433/*
434 * mmc_wait_data_done() - done callback for data request
435 * @mrq: done data request
436 *
437 * Wakes up mmc context, passed as a callback to host controller driver
438 */
439static void mmc_wait_data_done(struct mmc_request *mrq)
440{
441 struct mmc_context_info *context_info = &mrq->host->context_info;
442
443 context_info->is_done_rcv = true;
444 wake_up_interruptible(&context_info->wait);
445}
446
447static void mmc_wait_done(struct mmc_request *mrq)
448{
449 complete(&mrq->completion);
450}
451
452static inline void mmc_wait_ongoing_tfr_cmd(struct mmc_host *host)
453{
454 struct mmc_request *ongoing_mrq = READ_ONCE(host->ongoing_mrq);
455
456 /*
457 * If there is an ongoing transfer, wait for the command line to become
458 * available.
459 */
460 if (ongoing_mrq && !completion_done(&ongoing_mrq->cmd_completion))
461 wait_for_completion(&ongoing_mrq->cmd_completion);
462}
463
464/*
465 *__mmc_start_data_req() - starts data request
466 * @host: MMC host to start the request
467 * @mrq: data request to start
468 *
469 * Sets the done callback to be called when request is completed by the card.
470 * Starts data mmc request execution
471 * If an ongoing transfer is already in progress, wait for the command line
472 * to become available before sending another command.
473 */
474static int __mmc_start_data_req(struct mmc_host *host, struct mmc_request *mrq)
475{
476 int err;
477
478 mmc_wait_ongoing_tfr_cmd(host);
479
480 mrq->done = mmc_wait_data_done;
481 mrq->host = host;
482
483 init_completion(&mrq->cmd_completion);
484
485 err = mmc_start_request(host, mrq);
486 if (err) {
487 mrq->cmd->error = err;
488 mmc_complete_cmd(mrq);
489 mmc_wait_data_done(mrq);
490 }
491
492 return err;
493}
494
495static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
496{
497 int err;
498
499 mmc_wait_ongoing_tfr_cmd(host);
500
501 init_completion(&mrq->completion);
502 mrq->done = mmc_wait_done;
503
504 init_completion(&mrq->cmd_completion);
505
506 err = mmc_start_request(host, mrq);
507 if (err) {
508 mrq->cmd->error = err;
509 mmc_complete_cmd(mrq);
510 complete(&mrq->completion);
511 }
512
513 return err;
514}
515
516void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq)
517{
518 struct mmc_command *cmd;
519
520 while (1) {
521 wait_for_completion(&mrq->completion);
522
523 cmd = mrq->cmd;
524
525 /*
526 * If host has timed out waiting for the sanitize
527 * to complete, card might be still in programming state
528 * so let's try to bring the card out of programming
529 * state.
530 */
531 if (cmd->sanitize_busy && cmd->error == -ETIMEDOUT) {
532 if (!mmc_interrupt_hpi(host->card)) {
533 pr_warn("%s: %s: Interrupted sanitize\n",
534 mmc_hostname(host), __func__);
535 cmd->error = 0;
536 break;
537 } else {
538 pr_err("%s: %s: Failed to interrupt sanitize\n",
539 mmc_hostname(host), __func__);
540 }
541 }
542 if (!cmd->error || !cmd->retries ||
543 mmc_card_removed(host->card))
544 break;
545
546 mmc_retune_recheck(host);
547
548 pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
549 mmc_hostname(host), cmd->opcode, cmd->error);
550 cmd->retries--;
551 cmd->error = 0;
552 __mmc_start_request(host, mrq);
553 }
554
555 mmc_retune_release(host);
556}
557EXPORT_SYMBOL(mmc_wait_for_req_done);
558
559/**
560 * mmc_is_req_done - Determine if a 'cap_cmd_during_tfr' request is done
561 * @host: MMC host
562 * @mrq: MMC request
563 *
564 * mmc_is_req_done() is used with requests that have
565 * mrq->cap_cmd_during_tfr = true. mmc_is_req_done() must be called after
566 * starting a request and before waiting for it to complete. That is,
567 * either in between calls to mmc_start_req(), or after mmc_wait_for_req()
568 * and before mmc_wait_for_req_done(). If it is called at other times the
569 * result is not meaningful.
570 */
571bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq)
572{
573 if (host->areq)
574 return host->context_info.is_done_rcv;
575 else
576 return completion_done(&mrq->completion);
577}
578EXPORT_SYMBOL(mmc_is_req_done);
579
580/**
581 * mmc_pre_req - Prepare for a new request
582 * @host: MMC host to prepare command
583 * @mrq: MMC request to prepare for
584 *
585 * mmc_pre_req() is called in prior to mmc_start_req() to let
586 * host prepare for the new request. Preparation of a request may be
587 * performed while another request is running on the host.
588 */
589static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq)
590{
591 if (host->ops->pre_req)
592 host->ops->pre_req(host, mrq);
593}
594
595/**
596 * mmc_post_req - Post process a completed request
597 * @host: MMC host to post process command
598 * @mrq: MMC request to post process for
599 * @err: Error, if non zero, clean up any resources made in pre_req
600 *
601 * Let the host post process a completed request. Post processing of
602 * a request may be performed while another reuqest is running.
603 */
604static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
605 int err)
606{
607 if (host->ops->post_req)
608 host->ops->post_req(host, mrq, err);
609}
610
611/**
612 * mmc_finalize_areq() - finalize an asynchronous request
613 * @host: MMC host to finalize any ongoing request on
614 *
615 * Returns the status of the ongoing asynchronous request, but
616 * MMC_BLK_SUCCESS if no request was going on.
617 */
618static enum mmc_blk_status mmc_finalize_areq(struct mmc_host *host)
619{
620 struct mmc_context_info *context_info = &host->context_info;
621 enum mmc_blk_status status;
622
623 if (!host->areq)
624 return MMC_BLK_SUCCESS;
625
626 while (1) {
627 wait_event_interruptible(context_info->wait,
628 (context_info->is_done_rcv ||
629 context_info->is_new_req));
630
631 if (context_info->is_done_rcv) {
632 struct mmc_command *cmd;
633
634 context_info->is_done_rcv = false;
635 cmd = host->areq->mrq->cmd;
636
637 if (!cmd->error || !cmd->retries ||
638 mmc_card_removed(host->card)) {
639 status = host->areq->err_check(host->card,
640 host->areq);
641 break; /* return status */
642 } else {
643 mmc_retune_recheck(host);
644 pr_info("%s: req failed (CMD%u): %d, retrying...\n",
645 mmc_hostname(host),
646 cmd->opcode, cmd->error);
647 cmd->retries--;
648 cmd->error = 0;
649 __mmc_start_request(host, host->areq->mrq);
650 continue; /* wait for done/new event again */
651 }
652 }
653
654 return MMC_BLK_NEW_REQUEST;
655 }
656
657 mmc_retune_release(host);
658
659 /*
660 * Check BKOPS urgency for each R1 response
661 */
662 if (host->card && mmc_card_mmc(host->card) &&
663 ((mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1) ||
664 (mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1B)) &&
665 (host->areq->mrq->cmd->resp[0] & R1_EXCEPTION_EVENT)) {
666 mmc_start_bkops(host->card, true);
667 }
668
669 return status;
670}
671
672/**
673 * mmc_start_areq - start an asynchronous request
674 * @host: MMC host to start command
675 * @areq: asynchronous request to start
676 * @ret_stat: out parameter for status
677 *
678 * Start a new MMC custom command request for a host.
679 * If there is on ongoing async request wait for completion
680 * of that request and start the new one and return.
681 * Does not wait for the new request to complete.
682 *
683 * Returns the completed request, NULL in case of none completed.
684 * Wait for the an ongoing request (previoulsy started) to complete and
685 * return the completed request. If there is no ongoing request, NULL
686 * is returned without waiting. NULL is not an error condition.
687 */
688struct mmc_async_req *mmc_start_areq(struct mmc_host *host,
689 struct mmc_async_req *areq,
690 enum mmc_blk_status *ret_stat)
691{
692 enum mmc_blk_status status;
693 int start_err = 0;
694 struct mmc_async_req *previous = host->areq;
695
696 /* Prepare a new request */
697 if (areq)
698 mmc_pre_req(host, areq->mrq);
699
700 /* Finalize previous request */
701 status = mmc_finalize_areq(host);
702 if (ret_stat)
703 *ret_stat = status;
704
705 /* The previous request is still going on... */
706 if (status == MMC_BLK_NEW_REQUEST)
707 return NULL;
708
709 /* Fine so far, start the new request! */
710 if (status == MMC_BLK_SUCCESS && areq)
711 start_err = __mmc_start_data_req(host, areq->mrq);
712
713 /* Postprocess the old request at this point */
714 if (host->areq)
715 mmc_post_req(host, host->areq->mrq, 0);
716
717 /* Cancel a prepared request if it was not started. */
718 if ((status != MMC_BLK_SUCCESS || start_err) && areq)
719 mmc_post_req(host, areq->mrq, -EINVAL);
720
721 if (status != MMC_BLK_SUCCESS)
722 host->areq = NULL;
723 else
724 host->areq = areq;
725
726 return previous;
727}
728EXPORT_SYMBOL(mmc_start_areq);
729
730/**
731 * mmc_wait_for_req - start a request and wait for completion
732 * @host: MMC host to start command
733 * @mrq: MMC request to start
734 *
735 * Start a new MMC custom command request for a host, and wait
736 * for the command to complete. In the case of 'cap_cmd_during_tfr'
737 * requests, the transfer is ongoing and the caller can issue further
738 * commands that do not use the data lines, and then wait by calling
739 * mmc_wait_for_req_done().
740 * Does not attempt to parse the response.
741 */
742void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
743{
744 __mmc_start_req(host, mrq);
745
746 if (!mrq->cap_cmd_during_tfr)
747 mmc_wait_for_req_done(host, mrq);
748}
749EXPORT_SYMBOL(mmc_wait_for_req);
750
751/**
752 * mmc_interrupt_hpi - Issue for High priority Interrupt
753 * @card: the MMC card associated with the HPI transfer
754 *
755 * Issued High Priority Interrupt, and check for card status
756 * until out-of prg-state.
757 */
758int mmc_interrupt_hpi(struct mmc_card *card)
759{
760 int err;
761 u32 status;
762 unsigned long prg_wait;
763
764 if (!card->ext_csd.hpi_en) {
765 pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
766 return 1;
767 }
768
769 mmc_claim_host(card->host);
770 err = mmc_send_status(card, &status);
771 if (err) {
772 pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
773 goto out;
774 }
775
776 switch (R1_CURRENT_STATE(status)) {
777 case R1_STATE_IDLE:
778 case R1_STATE_READY:
779 case R1_STATE_STBY:
780 case R1_STATE_TRAN:
781 /*
782 * In idle and transfer states, HPI is not needed and the caller
783 * can issue the next intended command immediately
784 */
785 goto out;
786 case R1_STATE_PRG:
787 break;
788 default:
789 /* In all other states, it's illegal to issue HPI */
790 pr_debug("%s: HPI cannot be sent. Card state=%d\n",
791 mmc_hostname(card->host), R1_CURRENT_STATE(status));
792 err = -EINVAL;
793 goto out;
794 }
795
796 err = mmc_send_hpi_cmd(card, &status);
797 if (err)
798 goto out;
799
800 prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
801 do {
802 err = mmc_send_status(card, &status);
803
804 if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
805 break;
806 if (time_after(jiffies, prg_wait))
807 err = -ETIMEDOUT;
808 } while (!err);
809
810out:
811 mmc_release_host(card->host);
812 return err;
813}
814EXPORT_SYMBOL(mmc_interrupt_hpi);
815
816/**
817 * mmc_wait_for_cmd - start a command and wait for completion
818 * @host: MMC host to start command
819 * @cmd: MMC command to start
820 * @retries: maximum number of retries
821 *
822 * Start a new MMC command for a host, and wait for the command
823 * to complete. Return any error that occurred while the command
824 * was executing. Do not attempt to parse the response.
825 */
826int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
827{
828 struct mmc_request mrq = {};
829
830 WARN_ON(!host->claimed);
831
832 memset(cmd->resp, 0, sizeof(cmd->resp));
833 cmd->retries = retries;
834
835 mrq.cmd = cmd;
836 cmd->data = NULL;
837
838 mmc_wait_for_req(host, &mrq);
839
840 return cmd->error;
841}
842
843EXPORT_SYMBOL(mmc_wait_for_cmd);
844
845/**
846 * mmc_stop_bkops - stop ongoing BKOPS
847 * @card: MMC card to check BKOPS
848 *
849 * Send HPI command to stop ongoing background operations to
850 * allow rapid servicing of foreground operations, e.g. read/
851 * writes. Wait until the card comes out of the programming state
852 * to avoid errors in servicing read/write requests.
853 */
854int mmc_stop_bkops(struct mmc_card *card)
855{
856 int err = 0;
857
858 err = mmc_interrupt_hpi(card);
859
860 /*
861 * If err is EINVAL, we can't issue an HPI.
862 * It should complete the BKOPS.
863 */
864 if (!err || (err == -EINVAL)) {
865 mmc_card_clr_doing_bkops(card);
866 mmc_retune_release(card->host);
867 err = 0;
868 }
869
870 return err;
871}
872EXPORT_SYMBOL(mmc_stop_bkops);
873
874int mmc_read_bkops_status(struct mmc_card *card)
875{
876 int err;
877 u8 *ext_csd;
878
879 mmc_claim_host(card->host);
880 err = mmc_get_ext_csd(card, &ext_csd);
881 mmc_release_host(card->host);
882 if (err)
883 return err;
884
885 card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
886 card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
887 kfree(ext_csd);
888 return 0;
889}
890EXPORT_SYMBOL(mmc_read_bkops_status);
891
892/**
893 * mmc_set_data_timeout - set the timeout for a data command
894 * @data: data phase for command
895 * @card: the MMC card associated with the data transfer
896 *
897 * Computes the data timeout parameters according to the
898 * correct algorithm given the card type.
899 */
900void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
901{
902 unsigned int mult;
903
904 /*
905 * SDIO cards only define an upper 1 s limit on access.
906 */
907 if (mmc_card_sdio(card)) {
908 data->timeout_ns = 1000000000;
909 data->timeout_clks = 0;
910 return;
911 }
912
913 /*
914 * SD cards use a 100 multiplier rather than 10
915 */
916 mult = mmc_card_sd(card) ? 100 : 10;
917
918 /*
919 * Scale up the multiplier (and therefore the timeout) by
920 * the r2w factor for writes.
921 */
922 if (data->flags & MMC_DATA_WRITE)
923 mult <<= card->csd.r2w_factor;
924
925 data->timeout_ns = card->csd.tacc_ns * mult;
926 data->timeout_clks = card->csd.tacc_clks * mult;
927
928 /*
929 * SD cards also have an upper limit on the timeout.
930 */
931 if (mmc_card_sd(card)) {
932 unsigned int timeout_us, limit_us;
933
934 timeout_us = data->timeout_ns / 1000;
935 if (card->host->ios.clock)
936 timeout_us += data->timeout_clks * 1000 /
937 (card->host->ios.clock / 1000);
938
939 if (data->flags & MMC_DATA_WRITE)
940 /*
941 * The MMC spec "It is strongly recommended
942 * for hosts to implement more than 500ms
943 * timeout value even if the card indicates
944 * the 250ms maximum busy length." Even the
945 * previous value of 300ms is known to be
946 * insufficient for some cards.
947 */
948 limit_us = 3000000;
949 else
950 limit_us = 100000;
951
952 /*
953 * SDHC cards always use these fixed values.
954 */
955 if (timeout_us > limit_us || mmc_card_blockaddr(card)) {
956 data->timeout_ns = limit_us * 1000;
957 data->timeout_clks = 0;
958 }
959
960 /* assign limit value if invalid */
961 if (timeout_us == 0)
962 data->timeout_ns = limit_us * 1000;
963 }
964
965 /*
966 * Some cards require longer data read timeout than indicated in CSD.
967 * Address this by setting the read timeout to a "reasonably high"
968 * value. For the cards tested, 600ms has proven enough. If necessary,
969 * this value can be increased if other problematic cards require this.
970 */
971 if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
972 data->timeout_ns = 600000000;
973 data->timeout_clks = 0;
974 }
975
976 /*
977 * Some cards need very high timeouts if driven in SPI mode.
978 * The worst observed timeout was 900ms after writing a
979 * continuous stream of data until the internal logic
980 * overflowed.
981 */
982 if (mmc_host_is_spi(card->host)) {
983 if (data->flags & MMC_DATA_WRITE) {
984 if (data->timeout_ns < 1000000000)
985 data->timeout_ns = 1000000000; /* 1s */
986 } else {
987 if (data->timeout_ns < 100000000)
988 data->timeout_ns = 100000000; /* 100ms */
989 }
990 }
991}
992EXPORT_SYMBOL(mmc_set_data_timeout);
993
994/**
995 * mmc_align_data_size - pads a transfer size to a more optimal value
996 * @card: the MMC card associated with the data transfer
997 * @sz: original transfer size
998 *
999 * Pads the original data size with a number of extra bytes in
1000 * order to avoid controller bugs and/or performance hits
1001 * (e.g. some controllers revert to PIO for certain sizes).
1002 *
1003 * Returns the improved size, which might be unmodified.
1004 *
1005 * Note that this function is only relevant when issuing a
1006 * single scatter gather entry.
1007 */
1008unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
1009{
1010 /*
1011 * FIXME: We don't have a system for the controller to tell
1012 * the core about its problems yet, so for now we just 32-bit
1013 * align the size.
1014 */
1015 sz = ((sz + 3) / 4) * 4;
1016
1017 return sz;
1018}
1019EXPORT_SYMBOL(mmc_align_data_size);
1020
1021/**
1022 * __mmc_claim_host - exclusively claim a host
1023 * @host: mmc host to claim
1024 * @abort: whether or not the operation should be aborted
1025 *
1026 * Claim a host for a set of operations. If @abort is non null and
1027 * dereference a non-zero value then this will return prematurely with
1028 * that non-zero value without acquiring the lock. Returns zero
1029 * with the lock held otherwise.
1030 */
1031int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
1032{
1033 DECLARE_WAITQUEUE(wait, current);
1034 unsigned long flags;
1035 int stop;
1036 bool pm = false;
1037
1038 might_sleep();
1039
1040 add_wait_queue(&host->wq, &wait);
1041 spin_lock_irqsave(&host->lock, flags);
1042 while (1) {
1043 set_current_state(TASK_UNINTERRUPTIBLE);
1044 stop = abort ? atomic_read(abort) : 0;
1045 if (stop || !host->claimed || host->claimer == current)
1046 break;
1047 spin_unlock_irqrestore(&host->lock, flags);
1048 schedule();
1049 spin_lock_irqsave(&host->lock, flags);
1050 }
1051 set_current_state(TASK_RUNNING);
1052 if (!stop) {
1053 host->claimed = 1;
1054 host->claimer = current;
1055 host->claim_cnt += 1;
1056 if (host->claim_cnt == 1)
1057 pm = true;
1058 } else
1059 wake_up(&host->wq);
1060 spin_unlock_irqrestore(&host->lock, flags);
1061 remove_wait_queue(&host->wq, &wait);
1062
1063 if (pm)
1064 pm_runtime_get_sync(mmc_dev(host));
1065
1066 return stop;
1067}
1068EXPORT_SYMBOL(__mmc_claim_host);
1069
1070/**
1071 * mmc_release_host - release a host
1072 * @host: mmc host to release
1073 *
1074 * Release a MMC host, allowing others to claim the host
1075 * for their operations.
1076 */
1077void mmc_release_host(struct mmc_host *host)
1078{
1079 unsigned long flags;
1080
1081 WARN_ON(!host->claimed);
1082
1083 spin_lock_irqsave(&host->lock, flags);
1084 if (--host->claim_cnt) {
1085 /* Release for nested claim */
1086 spin_unlock_irqrestore(&host->lock, flags);
1087 } else {
1088 host->claimed = 0;
1089 host->claimer = NULL;
1090 spin_unlock_irqrestore(&host->lock, flags);
1091 wake_up(&host->wq);
1092 pm_runtime_mark_last_busy(mmc_dev(host));
1093 pm_runtime_put_autosuspend(mmc_dev(host));
1094 }
1095}
1096EXPORT_SYMBOL(mmc_release_host);
1097
1098/*
1099 * This is a helper function, which fetches a runtime pm reference for the
1100 * card device and also claims the host.
1101 */
1102void mmc_get_card(struct mmc_card *card)
1103{
1104 pm_runtime_get_sync(&card->dev);
1105 mmc_claim_host(card->host);
1106}
1107EXPORT_SYMBOL(mmc_get_card);
1108
1109/*
1110 * This is a helper function, which releases the host and drops the runtime
1111 * pm reference for the card device.
1112 */
1113void mmc_put_card(struct mmc_card *card)
1114{
1115 mmc_release_host(card->host);
1116 pm_runtime_mark_last_busy(&card->dev);
1117 pm_runtime_put_autosuspend(&card->dev);
1118}
1119EXPORT_SYMBOL(mmc_put_card);
1120
1121/*
1122 * Internal function that does the actual ios call to the host driver,
1123 * optionally printing some debug output.
1124 */
1125static inline void mmc_set_ios(struct mmc_host *host)
1126{
1127 struct mmc_ios *ios = &host->ios;
1128
1129 pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
1130 "width %u timing %u\n",
1131 mmc_hostname(host), ios->clock, ios->bus_mode,
1132 ios->power_mode, ios->chip_select, ios->vdd,
1133 1 << ios->bus_width, ios->timing);
1134
1135 host->ops->set_ios(host, ios);
1136}
1137
1138/*
1139 * Control chip select pin on a host.
1140 */
1141void mmc_set_chip_select(struct mmc_host *host, int mode)
1142{
1143 host->ios.chip_select = mode;
1144 mmc_set_ios(host);
1145}
1146
1147/*
1148 * Sets the host clock to the highest possible frequency that
1149 * is below "hz".
1150 */
1151void mmc_set_clock(struct mmc_host *host, unsigned int hz)
1152{
1153 WARN_ON(hz && hz < host->f_min);
1154
1155 if (hz > host->f_max)
1156 hz = host->f_max;
1157
1158 host->ios.clock = hz;
1159 mmc_set_ios(host);
1160}
1161
1162int mmc_execute_tuning(struct mmc_card *card)
1163{
1164 struct mmc_host *host = card->host;
1165 u32 opcode;
1166 int err;
1167
1168 if (!host->ops->execute_tuning)
1169 return 0;
1170
1171 if (mmc_card_mmc(card))
1172 opcode = MMC_SEND_TUNING_BLOCK_HS200;
1173 else
1174 opcode = MMC_SEND_TUNING_BLOCK;
1175
1176 err = host->ops->execute_tuning(host, opcode);
1177
1178 if (err)
1179 pr_err("%s: tuning execution failed: %d\n",
1180 mmc_hostname(host), err);
1181 else
1182 mmc_retune_enable(host);
1183
1184 return err;
1185}
1186
1187/*
1188 * Change the bus mode (open drain/push-pull) of a host.
1189 */
1190void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
1191{
1192 host->ios.bus_mode = mode;
1193 mmc_set_ios(host);
1194}
1195
1196/*
1197 * Change data bus width of a host.
1198 */
1199void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
1200{
1201 host->ios.bus_width = width;
1202 mmc_set_ios(host);
1203}
1204
1205/*
1206 * Set initial state after a power cycle or a hw_reset.
1207 */
1208void mmc_set_initial_state(struct mmc_host *host)
1209{
1210 mmc_retune_disable(host);
1211
1212 if (mmc_host_is_spi(host))
1213 host->ios.chip_select = MMC_CS_HIGH;
1214 else
1215 host->ios.chip_select = MMC_CS_DONTCARE;
1216 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
1217 host->ios.bus_width = MMC_BUS_WIDTH_1;
1218 host->ios.timing = MMC_TIMING_LEGACY;
1219 host->ios.drv_type = 0;
1220 host->ios.enhanced_strobe = false;
1221
1222 /*
1223 * Make sure we are in non-enhanced strobe mode before we
1224 * actually enable it in ext_csd.
1225 */
1226 if ((host->caps2 & MMC_CAP2_HS400_ES) &&
1227 host->ops->hs400_enhanced_strobe)
1228 host->ops->hs400_enhanced_strobe(host, &host->ios);
1229
1230 mmc_set_ios(host);
1231}
1232
1233/**
1234 * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
1235 * @vdd: voltage (mV)
1236 * @low_bits: prefer low bits in boundary cases
1237 *
1238 * This function returns the OCR bit number according to the provided @vdd
1239 * value. If conversion is not possible a negative errno value returned.
1240 *
1241 * Depending on the @low_bits flag the function prefers low or high OCR bits
1242 * on boundary voltages. For example,
1243 * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
1244 * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
1245 *
1246 * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
1247 */
1248static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
1249{
1250 const int max_bit = ilog2(MMC_VDD_35_36);
1251 int bit;
1252
1253 if (vdd < 1650 || vdd > 3600)
1254 return -EINVAL;
1255
1256 if (vdd >= 1650 && vdd <= 1950)
1257 return ilog2(MMC_VDD_165_195);
1258
1259 if (low_bits)
1260 vdd -= 1;
1261
1262 /* Base 2000 mV, step 100 mV, bit's base 8. */
1263 bit = (vdd - 2000) / 100 + 8;
1264 if (bit > max_bit)
1265 return max_bit;
1266 return bit;
1267}
1268
1269/**
1270 * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
1271 * @vdd_min: minimum voltage value (mV)
1272 * @vdd_max: maximum voltage value (mV)
1273 *
1274 * This function returns the OCR mask bits according to the provided @vdd_min
1275 * and @vdd_max values. If conversion is not possible the function returns 0.
1276 *
1277 * Notes wrt boundary cases:
1278 * This function sets the OCR bits for all boundary voltages, for example
1279 * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
1280 * MMC_VDD_34_35 mask.
1281 */
1282u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
1283{
1284 u32 mask = 0;
1285
1286 if (vdd_max < vdd_min)
1287 return 0;
1288
1289 /* Prefer high bits for the boundary vdd_max values. */
1290 vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
1291 if (vdd_max < 0)
1292 return 0;
1293
1294 /* Prefer low bits for the boundary vdd_min values. */
1295 vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
1296 if (vdd_min < 0)
1297 return 0;
1298
1299 /* Fill the mask, from max bit to min bit. */
1300 while (vdd_max >= vdd_min)
1301 mask |= 1 << vdd_max--;
1302
1303 return mask;
1304}
1305EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
1306
1307#ifdef CONFIG_OF
1308
1309/**
1310 * mmc_of_parse_voltage - return mask of supported voltages
1311 * @np: The device node need to be parsed.
1312 * @mask: mask of voltages available for MMC/SD/SDIO
1313 *
1314 * Parse the "voltage-ranges" DT property, returning zero if it is not
1315 * found, negative errno if the voltage-range specification is invalid,
1316 * or one if the voltage-range is specified and successfully parsed.
1317 */
1318int mmc_of_parse_voltage(struct device_node *np, u32 *mask)
1319{
1320 const u32 *voltage_ranges;
1321 int num_ranges, i;
1322
1323 voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges);
1324 num_ranges = num_ranges / sizeof(*voltage_ranges) / 2;
1325 if (!voltage_ranges) {
1326 pr_debug("%s: voltage-ranges unspecified\n", np->full_name);
1327 return 0;
1328 }
1329 if (!num_ranges) {
1330 pr_err("%s: voltage-ranges empty\n", np->full_name);
1331 return -EINVAL;
1332 }
1333
1334 for (i = 0; i < num_ranges; i++) {
1335 const int j = i * 2;
1336 u32 ocr_mask;
1337
1338 ocr_mask = mmc_vddrange_to_ocrmask(
1339 be32_to_cpu(voltage_ranges[j]),
1340 be32_to_cpu(voltage_ranges[j + 1]));
1341 if (!ocr_mask) {
1342 pr_err("%s: voltage-range #%d is invalid\n",
1343 np->full_name, i);
1344 return -EINVAL;
1345 }
1346 *mask |= ocr_mask;
1347 }
1348
1349 return 1;
1350}
1351EXPORT_SYMBOL(mmc_of_parse_voltage);
1352
1353#endif /* CONFIG_OF */
1354
1355static int mmc_of_get_func_num(struct device_node *node)
1356{
1357 u32 reg;
1358 int ret;
1359
1360 ret = of_property_read_u32(node, "reg", ®);
1361 if (ret < 0)
1362 return ret;
1363
1364 return reg;
1365}
1366
1367struct device_node *mmc_of_find_child_device(struct mmc_host *host,
1368 unsigned func_num)
1369{
1370 struct device_node *node;
1371
1372 if (!host->parent || !host->parent->of_node)
1373 return NULL;
1374
1375 for_each_child_of_node(host->parent->of_node, node) {
1376 if (mmc_of_get_func_num(node) == func_num)
1377 return node;
1378 }
1379
1380 return NULL;
1381}
1382
1383#ifdef CONFIG_REGULATOR
1384
1385/**
1386 * mmc_ocrbitnum_to_vdd - Convert a OCR bit number to its voltage
1387 * @vdd_bit: OCR bit number
1388 * @min_uV: minimum voltage value (mV)
1389 * @max_uV: maximum voltage value (mV)
1390 *
1391 * This function returns the voltage range according to the provided OCR
1392 * bit number. If conversion is not possible a negative errno value returned.
1393 */
1394static int mmc_ocrbitnum_to_vdd(int vdd_bit, int *min_uV, int *max_uV)
1395{
1396 int tmp;
1397
1398 if (!vdd_bit)
1399 return -EINVAL;
1400
1401 /*
1402 * REVISIT mmc_vddrange_to_ocrmask() may have set some
1403 * bits this regulator doesn't quite support ... don't
1404 * be too picky, most cards and regulators are OK with
1405 * a 0.1V range goof (it's a small error percentage).
1406 */
1407 tmp = vdd_bit - ilog2(MMC_VDD_165_195);
1408 if (tmp == 0) {
1409 *min_uV = 1650 * 1000;
1410 *max_uV = 1950 * 1000;
1411 } else {
1412 *min_uV = 1900 * 1000 + tmp * 100 * 1000;
1413 *max_uV = *min_uV + 100 * 1000;
1414 }
1415
1416 return 0;
1417}
1418
1419/**
1420 * mmc_regulator_get_ocrmask - return mask of supported voltages
1421 * @supply: regulator to use
1422 *
1423 * This returns either a negative errno, or a mask of voltages that
1424 * can be provided to MMC/SD/SDIO devices using the specified voltage
1425 * regulator. This would normally be called before registering the
1426 * MMC host adapter.
1427 */
1428int mmc_regulator_get_ocrmask(struct regulator *supply)
1429{
1430 int result = 0;
1431 int count;
1432 int i;
1433 int vdd_uV;
1434 int vdd_mV;
1435
1436 count = regulator_count_voltages(supply);
1437 if (count < 0)
1438 return count;
1439
1440 for (i = 0; i < count; i++) {
1441 vdd_uV = regulator_list_voltage(supply, i);
1442 if (vdd_uV <= 0)
1443 continue;
1444
1445 vdd_mV = vdd_uV / 1000;
1446 result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
1447 }
1448
1449 if (!result) {
1450 vdd_uV = regulator_get_voltage(supply);
1451 if (vdd_uV <= 0)
1452 return vdd_uV;
1453
1454 vdd_mV = vdd_uV / 1000;
1455 result = mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
1456 }
1457
1458 return result;
1459}
1460EXPORT_SYMBOL_GPL(mmc_regulator_get_ocrmask);
1461
1462/**
1463 * mmc_regulator_set_ocr - set regulator to match host->ios voltage
1464 * @mmc: the host to regulate
1465 * @supply: regulator to use
1466 * @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
1467 *
1468 * Returns zero on success, else negative errno.
1469 *
1470 * MMC host drivers may use this to enable or disable a regulator using
1471 * a particular supply voltage. This would normally be called from the
1472 * set_ios() method.
1473 */
1474int mmc_regulator_set_ocr(struct mmc_host *mmc,
1475 struct regulator *supply,
1476 unsigned short vdd_bit)
1477{
1478 int result = 0;
1479 int min_uV, max_uV;
1480
1481 if (vdd_bit) {
1482 mmc_ocrbitnum_to_vdd(vdd_bit, &min_uV, &max_uV);
1483
1484 result = regulator_set_voltage(supply, min_uV, max_uV);
1485 if (result == 0 && !mmc->regulator_enabled) {
1486 result = regulator_enable(supply);
1487 if (!result)
1488 mmc->regulator_enabled = true;
1489 }
1490 } else if (mmc->regulator_enabled) {
1491 result = regulator_disable(supply);
1492 if (result == 0)
1493 mmc->regulator_enabled = false;
1494 }
1495
1496 if (result)
1497 dev_err(mmc_dev(mmc),
1498 "could not set regulator OCR (%d)\n", result);
1499 return result;
1500}
1501EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr);
1502
1503static int mmc_regulator_set_voltage_if_supported(struct regulator *regulator,
1504 int min_uV, int target_uV,
1505 int max_uV)
1506{
1507 /*
1508 * Check if supported first to avoid errors since we may try several
1509 * signal levels during power up and don't want to show errors.
1510 */
1511 if (!regulator_is_supported_voltage(regulator, min_uV, max_uV))
1512 return -EINVAL;
1513
1514 return regulator_set_voltage_triplet(regulator, min_uV, target_uV,
1515 max_uV);
1516}
1517
1518/**
1519 * mmc_regulator_set_vqmmc - Set VQMMC as per the ios
1520 *
1521 * For 3.3V signaling, we try to match VQMMC to VMMC as closely as possible.
1522 * That will match the behavior of old boards where VQMMC and VMMC were supplied
1523 * by the same supply. The Bus Operating conditions for 3.3V signaling in the
1524 * SD card spec also define VQMMC in terms of VMMC.
1525 * If this is not possible we'll try the full 2.7-3.6V of the spec.
1526 *
1527 * For 1.2V and 1.8V signaling we'll try to get as close as possible to the
1528 * requested voltage. This is definitely a good idea for UHS where there's a
1529 * separate regulator on the card that's trying to make 1.8V and it's best if
1530 * we match.
1531 *
1532 * This function is expected to be used by a controller's
1533 * start_signal_voltage_switch() function.
1534 */
1535int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios)
1536{
1537 struct device *dev = mmc_dev(mmc);
1538 int ret, volt, min_uV, max_uV;
1539
1540 /* If no vqmmc supply then we can't change the voltage */
1541 if (IS_ERR(mmc->supply.vqmmc))
1542 return -EINVAL;
1543
1544 switch (ios->signal_voltage) {
1545 case MMC_SIGNAL_VOLTAGE_120:
1546 return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1547 1100000, 1200000, 1300000);
1548 case MMC_SIGNAL_VOLTAGE_180:
1549 return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1550 1700000, 1800000, 1950000);
1551 case MMC_SIGNAL_VOLTAGE_330:
1552 ret = mmc_ocrbitnum_to_vdd(mmc->ios.vdd, &volt, &max_uV);
1553 if (ret < 0)
1554 return ret;
1555
1556 dev_dbg(dev, "%s: found vmmc voltage range of %d-%duV\n",
1557 __func__, volt, max_uV);
1558
1559 min_uV = max(volt - 300000, 2700000);
1560 max_uV = min(max_uV + 200000, 3600000);
1561
1562 /*
1563 * Due to a limitation in the current implementation of
1564 * regulator_set_voltage_triplet() which is taking the lowest
1565 * voltage possible if below the target, search for a suitable
1566 * voltage in two steps and try to stay close to vmmc
1567 * with a 0.3V tolerance at first.
1568 */
1569 if (!mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1570 min_uV, volt, max_uV))
1571 return 0;
1572
1573 return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1574 2700000, volt, 3600000);
1575 default:
1576 return -EINVAL;
1577 }
1578}
1579EXPORT_SYMBOL_GPL(mmc_regulator_set_vqmmc);
1580
1581#endif /* CONFIG_REGULATOR */
1582
1583int mmc_regulator_get_supply(struct mmc_host *mmc)
1584{
1585 struct device *dev = mmc_dev(mmc);
1586 int ret;
1587
1588 mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc");
1589 mmc->supply.vqmmc = devm_regulator_get_optional(dev, "vqmmc");
1590
1591 if (IS_ERR(mmc->supply.vmmc)) {
1592 if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER)
1593 return -EPROBE_DEFER;
1594 dev_dbg(dev, "No vmmc regulator found\n");
1595 } else {
1596 ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc);
1597 if (ret > 0)
1598 mmc->ocr_avail = ret;
1599 else
1600 dev_warn(dev, "Failed getting OCR mask: %d\n", ret);
1601 }
1602
1603 if (IS_ERR(mmc->supply.vqmmc)) {
1604 if (PTR_ERR(mmc->supply.vqmmc) == -EPROBE_DEFER)
1605 return -EPROBE_DEFER;
1606 dev_dbg(dev, "No vqmmc regulator found\n");
1607 }
1608
1609 return 0;
1610}
1611EXPORT_SYMBOL_GPL(mmc_regulator_get_supply);
1612
1613/*
1614 * Mask off any voltages we don't support and select
1615 * the lowest voltage
1616 */
1617u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
1618{
1619 int bit;
1620
1621 /*
1622 * Sanity check the voltages that the card claims to
1623 * support.
1624 */
1625 if (ocr & 0x7F) {
1626 dev_warn(mmc_dev(host),
1627 "card claims to support voltages below defined range\n");
1628 ocr &= ~0x7F;
1629 }
1630
1631 ocr &= host->ocr_avail;
1632 if (!ocr) {
1633 dev_warn(mmc_dev(host), "no support for card's volts\n");
1634 return 0;
1635 }
1636
1637 if (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) {
1638 bit = ffs(ocr) - 1;
1639 ocr &= 3 << bit;
1640 mmc_power_cycle(host, ocr);
1641 } else {
1642 bit = fls(ocr) - 1;
1643 ocr &= 3 << bit;
1644 if (bit != host->ios.vdd)
1645 dev_warn(mmc_dev(host), "exceeding card's volts\n");
1646 }
1647
1648 return ocr;
1649}
1650
1651int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
1652{
1653 int err = 0;
1654 int old_signal_voltage = host->ios.signal_voltage;
1655
1656 host->ios.signal_voltage = signal_voltage;
1657 if (host->ops->start_signal_voltage_switch)
1658 err = host->ops->start_signal_voltage_switch(host, &host->ios);
1659
1660 if (err)
1661 host->ios.signal_voltage = old_signal_voltage;
1662
1663 return err;
1664
1665}
1666
1667int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr)
1668{
1669 struct mmc_command cmd = {};
1670 int err = 0;
1671 u32 clock;
1672
1673 /*
1674 * If we cannot switch voltages, return failure so the caller
1675 * can continue without UHS mode
1676 */
1677 if (!host->ops->start_signal_voltage_switch)
1678 return -EPERM;
1679 if (!host->ops->card_busy)
1680 pr_warn("%s: cannot verify signal voltage switch\n",
1681 mmc_hostname(host));
1682
1683 cmd.opcode = SD_SWITCH_VOLTAGE;
1684 cmd.arg = 0;
1685 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1686
1687 err = mmc_wait_for_cmd(host, &cmd, 0);
1688 if (err)
1689 return err;
1690
1691 if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
1692 return -EIO;
1693
1694 /*
1695 * The card should drive cmd and dat[0:3] low immediately
1696 * after the response of cmd11, but wait 1 ms to be sure
1697 */
1698 mmc_delay(1);
1699 if (host->ops->card_busy && !host->ops->card_busy(host)) {
1700 err = -EAGAIN;
1701 goto power_cycle;
1702 }
1703 /*
1704 * During a signal voltage level switch, the clock must be gated
1705 * for 5 ms according to the SD spec
1706 */
1707 clock = host->ios.clock;
1708 host->ios.clock = 0;
1709 mmc_set_ios(host);
1710
1711 if (mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180)) {
1712 /*
1713 * Voltages may not have been switched, but we've already
1714 * sent CMD11, so a power cycle is required anyway
1715 */
1716 err = -EAGAIN;
1717 goto power_cycle;
1718 }
1719
1720 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
1721 mmc_delay(10);
1722 host->ios.clock = clock;
1723 mmc_set_ios(host);
1724
1725 /* Wait for at least 1 ms according to spec */
1726 mmc_delay(1);
1727
1728 /*
1729 * Failure to switch is indicated by the card holding
1730 * dat[0:3] low
1731 */
1732 if (host->ops->card_busy && host->ops->card_busy(host))
1733 err = -EAGAIN;
1734
1735power_cycle:
1736 if (err) {
1737 pr_debug("%s: Signal voltage switch failed, "
1738 "power cycling card\n", mmc_hostname(host));
1739 mmc_power_cycle(host, ocr);
1740 }
1741
1742 return err;
1743}
1744
1745/*
1746 * Select timing parameters for host.
1747 */
1748void mmc_set_timing(struct mmc_host *host, unsigned int timing)
1749{
1750 host->ios.timing = timing;
1751 mmc_set_ios(host);
1752}
1753
1754/*
1755 * Select appropriate driver type for host.
1756 */
1757void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
1758{
1759 host->ios.drv_type = drv_type;
1760 mmc_set_ios(host);
1761}
1762
1763int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr,
1764 int card_drv_type, int *drv_type)
1765{
1766 struct mmc_host *host = card->host;
1767 int host_drv_type = SD_DRIVER_TYPE_B;
1768
1769 *drv_type = 0;
1770
1771 if (!host->ops->select_drive_strength)
1772 return 0;
1773
1774 /* Use SD definition of driver strength for hosts */
1775 if (host->caps & MMC_CAP_DRIVER_TYPE_A)
1776 host_drv_type |= SD_DRIVER_TYPE_A;
1777
1778 if (host->caps & MMC_CAP_DRIVER_TYPE_C)
1779 host_drv_type |= SD_DRIVER_TYPE_C;
1780
1781 if (host->caps & MMC_CAP_DRIVER_TYPE_D)
1782 host_drv_type |= SD_DRIVER_TYPE_D;
1783
1784 /*
1785 * The drive strength that the hardware can support
1786 * depends on the board design. Pass the appropriate
1787 * information and let the hardware specific code
1788 * return what is possible given the options
1789 */
1790 return host->ops->select_drive_strength(card, max_dtr,
1791 host_drv_type,
1792 card_drv_type,
1793 drv_type);
1794}
1795
1796/*
1797 * Apply power to the MMC stack. This is a two-stage process.
1798 * First, we enable power to the card without the clock running.
1799 * We then wait a bit for the power to stabilise. Finally,
1800 * enable the bus drivers and clock to the card.
1801 *
1802 * We must _NOT_ enable the clock prior to power stablising.
1803 *
1804 * If a host does all the power sequencing itself, ignore the
1805 * initial MMC_POWER_UP stage.
1806 */
1807void mmc_power_up(struct mmc_host *host, u32 ocr)
1808{
1809 if (host->ios.power_mode == MMC_POWER_ON)
1810 return;
1811
1812 mmc_pwrseq_pre_power_on(host);
1813
1814 host->ios.vdd = fls(ocr) - 1;
1815 host->ios.power_mode = MMC_POWER_UP;
1816 /* Set initial state and call mmc_set_ios */
1817 mmc_set_initial_state(host);
1818
1819 /* Try to set signal voltage to 3.3V but fall back to 1.8v or 1.2v */
1820 if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330))
1821 dev_dbg(mmc_dev(host), "Initial signal voltage of 3.3v\n");
1822 else if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180))
1823 dev_dbg(mmc_dev(host), "Initial signal voltage of 1.8v\n");
1824 else if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120))
1825 dev_dbg(mmc_dev(host), "Initial signal voltage of 1.2v\n");
1826
1827 /*
1828 * This delay should be sufficient to allow the power supply
1829 * to reach the minimum voltage.
1830 */
1831 mmc_delay(10);
1832
1833 mmc_pwrseq_post_power_on(host);
1834
1835 host->ios.clock = host->f_init;
1836
1837 host->ios.power_mode = MMC_POWER_ON;
1838 mmc_set_ios(host);
1839
1840 /*
1841 * This delay must be at least 74 clock sizes, or 1 ms, or the
1842 * time required to reach a stable voltage.
1843 */
1844 mmc_delay(10);
1845}
1846
1847void mmc_power_off(struct mmc_host *host)
1848{
1849 if (host->ios.power_mode == MMC_POWER_OFF)
1850 return;
1851
1852 mmc_pwrseq_power_off(host);
1853
1854 host->ios.clock = 0;
1855 host->ios.vdd = 0;
1856
1857 host->ios.power_mode = MMC_POWER_OFF;
1858 /* Set initial state and call mmc_set_ios */
1859 mmc_set_initial_state(host);
1860
1861 /*
1862 * Some configurations, such as the 802.11 SDIO card in the OLPC
1863 * XO-1.5, require a short delay after poweroff before the card
1864 * can be successfully turned on again.
1865 */
1866 mmc_delay(1);
1867}
1868
1869void mmc_power_cycle(struct mmc_host *host, u32 ocr)
1870{
1871 mmc_power_off(host);
1872 /* Wait at least 1 ms according to SD spec */
1873 mmc_delay(1);
1874 mmc_power_up(host, ocr);
1875}
1876
1877/*
1878 * Cleanup when the last reference to the bus operator is dropped.
1879 */
1880static void __mmc_release_bus(struct mmc_host *host)
1881{
1882 WARN_ON(!host->bus_dead);
1883
1884 host->bus_ops = NULL;
1885}
1886
1887/*
1888 * Increase reference count of bus operator
1889 */
1890static inline void mmc_bus_get(struct mmc_host *host)
1891{
1892 unsigned long flags;
1893
1894 spin_lock_irqsave(&host->lock, flags);
1895 host->bus_refs++;
1896 spin_unlock_irqrestore(&host->lock, flags);
1897}
1898
1899/*
1900 * Decrease reference count of bus operator and free it if
1901 * it is the last reference.
1902 */
1903static inline void mmc_bus_put(struct mmc_host *host)
1904{
1905 unsigned long flags;
1906
1907 spin_lock_irqsave(&host->lock, flags);
1908 host->bus_refs--;
1909 if ((host->bus_refs == 0) && host->bus_ops)
1910 __mmc_release_bus(host);
1911 spin_unlock_irqrestore(&host->lock, flags);
1912}
1913
1914/*
1915 * Assign a mmc bus handler to a host. Only one bus handler may control a
1916 * host at any given time.
1917 */
1918void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
1919{
1920 unsigned long flags;
1921
1922 WARN_ON(!host->claimed);
1923
1924 spin_lock_irqsave(&host->lock, flags);
1925
1926 WARN_ON(host->bus_ops);
1927 WARN_ON(host->bus_refs);
1928
1929 host->bus_ops = ops;
1930 host->bus_refs = 1;
1931 host->bus_dead = 0;
1932
1933 spin_unlock_irqrestore(&host->lock, flags);
1934}
1935
1936/*
1937 * Remove the current bus handler from a host.
1938 */
1939void mmc_detach_bus(struct mmc_host *host)
1940{
1941 unsigned long flags;
1942
1943 WARN_ON(!host->claimed);
1944 WARN_ON(!host->bus_ops);
1945
1946 spin_lock_irqsave(&host->lock, flags);
1947
1948 host->bus_dead = 1;
1949
1950 spin_unlock_irqrestore(&host->lock, flags);
1951
1952 mmc_bus_put(host);
1953}
1954
1955static void _mmc_detect_change(struct mmc_host *host, unsigned long delay,
1956 bool cd_irq)
1957{
1958#ifdef CONFIG_MMC_DEBUG
1959 unsigned long flags;
1960 spin_lock_irqsave(&host->lock, flags);
1961 WARN_ON(host->removed);
1962 spin_unlock_irqrestore(&host->lock, flags);
1963#endif
1964
1965 /*
1966 * If the device is configured as wakeup, we prevent a new sleep for
1967 * 5 s to give provision for user space to consume the event.
1968 */
1969 if (cd_irq && !(host->caps & MMC_CAP_NEEDS_POLL) &&
1970 device_can_wakeup(mmc_dev(host)))
1971 pm_wakeup_event(mmc_dev(host), 5000);
1972
1973 host->detect_change = 1;
1974 mmc_schedule_delayed_work(&host->detect, delay);
1975}
1976
1977/**
1978 * mmc_detect_change - process change of state on a MMC socket
1979 * @host: host which changed state.
1980 * @delay: optional delay to wait before detection (jiffies)
1981 *
1982 * MMC drivers should call this when they detect a card has been
1983 * inserted or removed. The MMC layer will confirm that any
1984 * present card is still functional, and initialize any newly
1985 * inserted.
1986 */
1987void mmc_detect_change(struct mmc_host *host, unsigned long delay)
1988{
1989 _mmc_detect_change(host, delay, true);
1990}
1991EXPORT_SYMBOL(mmc_detect_change);
1992
1993void mmc_init_erase(struct mmc_card *card)
1994{
1995 unsigned int sz;
1996
1997 if (is_power_of_2(card->erase_size))
1998 card->erase_shift = ffs(card->erase_size) - 1;
1999 else
2000 card->erase_shift = 0;
2001
2002 /*
2003 * It is possible to erase an arbitrarily large area of an SD or MMC
2004 * card. That is not desirable because it can take a long time
2005 * (minutes) potentially delaying more important I/O, and also the
2006 * timeout calculations become increasingly hugely over-estimated.
2007 * Consequently, 'pref_erase' is defined as a guide to limit erases
2008 * to that size and alignment.
2009 *
2010 * For SD cards that define Allocation Unit size, limit erases to one
2011 * Allocation Unit at a time.
2012 * For MMC, have a stab at ai good value and for modern cards it will
2013 * end up being 4MiB. Note that if the value is too small, it can end
2014 * up taking longer to erase. Also note, erase_size is already set to
2015 * High Capacity Erase Size if available when this function is called.
2016 */
2017 if (mmc_card_sd(card) && card->ssr.au) {
2018 card->pref_erase = card->ssr.au;
2019 card->erase_shift = ffs(card->ssr.au) - 1;
2020 } else if (card->erase_size) {
2021 sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
2022 if (sz < 128)
2023 card->pref_erase = 512 * 1024 / 512;
2024 else if (sz < 512)
2025 card->pref_erase = 1024 * 1024 / 512;
2026 else if (sz < 1024)
2027 card->pref_erase = 2 * 1024 * 1024 / 512;
2028 else
2029 card->pref_erase = 4 * 1024 * 1024 / 512;
2030 if (card->pref_erase < card->erase_size)
2031 card->pref_erase = card->erase_size;
2032 else {
2033 sz = card->pref_erase % card->erase_size;
2034 if (sz)
2035 card->pref_erase += card->erase_size - sz;
2036 }
2037 } else
2038 card->pref_erase = 0;
2039}
2040
2041static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
2042 unsigned int arg, unsigned int qty)
2043{
2044 unsigned int erase_timeout;
2045
2046 if (arg == MMC_DISCARD_ARG ||
2047 (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) {
2048 erase_timeout = card->ext_csd.trim_timeout;
2049 } else if (card->ext_csd.erase_group_def & 1) {
2050 /* High Capacity Erase Group Size uses HC timeouts */
2051 if (arg == MMC_TRIM_ARG)
2052 erase_timeout = card->ext_csd.trim_timeout;
2053 else
2054 erase_timeout = card->ext_csd.hc_erase_timeout;
2055 } else {
2056 /* CSD Erase Group Size uses write timeout */
2057 unsigned int mult = (10 << card->csd.r2w_factor);
2058 unsigned int timeout_clks = card->csd.tacc_clks * mult;
2059 unsigned int timeout_us;
2060
2061 /* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */
2062 if (card->csd.tacc_ns < 1000000)
2063 timeout_us = (card->csd.tacc_ns * mult) / 1000;
2064 else
2065 timeout_us = (card->csd.tacc_ns / 1000) * mult;
2066
2067 /*
2068 * ios.clock is only a target. The real clock rate might be
2069 * less but not that much less, so fudge it by multiplying by 2.
2070 */
2071 timeout_clks <<= 1;
2072 timeout_us += (timeout_clks * 1000) /
2073 (card->host->ios.clock / 1000);
2074
2075 erase_timeout = timeout_us / 1000;
2076
2077 /*
2078 * Theoretically, the calculation could underflow so round up
2079 * to 1ms in that case.
2080 */
2081 if (!erase_timeout)
2082 erase_timeout = 1;
2083 }
2084
2085 /* Multiplier for secure operations */
2086 if (arg & MMC_SECURE_ARGS) {
2087 if (arg == MMC_SECURE_ERASE_ARG)
2088 erase_timeout *= card->ext_csd.sec_erase_mult;
2089 else
2090 erase_timeout *= card->ext_csd.sec_trim_mult;
2091 }
2092
2093 erase_timeout *= qty;
2094
2095 /*
2096 * Ensure at least a 1 second timeout for SPI as per
2097 * 'mmc_set_data_timeout()'
2098 */
2099 if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
2100 erase_timeout = 1000;
2101
2102 return erase_timeout;
2103}
2104
2105static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
2106 unsigned int arg,
2107 unsigned int qty)
2108{
2109 unsigned int erase_timeout;
2110
2111 if (card->ssr.erase_timeout) {
2112 /* Erase timeout specified in SD Status Register (SSR) */
2113 erase_timeout = card->ssr.erase_timeout * qty +
2114 card->ssr.erase_offset;
2115 } else {
2116 /*
2117 * Erase timeout not specified in SD Status Register (SSR) so
2118 * use 250ms per write block.
2119 */
2120 erase_timeout = 250 * qty;
2121 }
2122
2123 /* Must not be less than 1 second */
2124 if (erase_timeout < 1000)
2125 erase_timeout = 1000;
2126
2127 return erase_timeout;
2128}
2129
2130static unsigned int mmc_erase_timeout(struct mmc_card *card,
2131 unsigned int arg,
2132 unsigned int qty)
2133{
2134 if (mmc_card_sd(card))
2135 return mmc_sd_erase_timeout(card, arg, qty);
2136 else
2137 return mmc_mmc_erase_timeout(card, arg, qty);
2138}
2139
2140static int mmc_do_erase(struct mmc_card *card, unsigned int from,
2141 unsigned int to, unsigned int arg)
2142{
2143 struct mmc_command cmd = {};
2144 unsigned int qty = 0, busy_timeout = 0;
2145 bool use_r1b_resp = false;
2146 unsigned long timeout;
2147 int err;
2148
2149 mmc_retune_hold(card->host);
2150
2151 /*
2152 * qty is used to calculate the erase timeout which depends on how many
2153 * erase groups (or allocation units in SD terminology) are affected.
2154 * We count erasing part of an erase group as one erase group.
2155 * For SD, the allocation units are always a power of 2. For MMC, the
2156 * erase group size is almost certainly also power of 2, but it does not
2157 * seem to insist on that in the JEDEC standard, so we fall back to
2158 * division in that case. SD may not specify an allocation unit size,
2159 * in which case the timeout is based on the number of write blocks.
2160 *
2161 * Note that the timeout for secure trim 2 will only be correct if the
2162 * number of erase groups specified is the same as the total of all
2163 * preceding secure trim 1 commands. Since the power may have been
2164 * lost since the secure trim 1 commands occurred, it is generally
2165 * impossible to calculate the secure trim 2 timeout correctly.
2166 */
2167 if (card->erase_shift)
2168 qty += ((to >> card->erase_shift) -
2169 (from >> card->erase_shift)) + 1;
2170 else if (mmc_card_sd(card))
2171 qty += to - from + 1;
2172 else
2173 qty += ((to / card->erase_size) -
2174 (from / card->erase_size)) + 1;
2175
2176 if (!mmc_card_blockaddr(card)) {
2177 from <<= 9;
2178 to <<= 9;
2179 }
2180
2181 if (mmc_card_sd(card))
2182 cmd.opcode = SD_ERASE_WR_BLK_START;
2183 else
2184 cmd.opcode = MMC_ERASE_GROUP_START;
2185 cmd.arg = from;
2186 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2187 err = mmc_wait_for_cmd(card->host, &cmd, 0);
2188 if (err) {
2189 pr_err("mmc_erase: group start error %d, "
2190 "status %#x\n", err, cmd.resp[0]);
2191 err = -EIO;
2192 goto out;
2193 }
2194
2195 memset(&cmd, 0, sizeof(struct mmc_command));
2196 if (mmc_card_sd(card))
2197 cmd.opcode = SD_ERASE_WR_BLK_END;
2198 else
2199 cmd.opcode = MMC_ERASE_GROUP_END;
2200 cmd.arg = to;
2201 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2202 err = mmc_wait_for_cmd(card->host, &cmd, 0);
2203 if (err) {
2204 pr_err("mmc_erase: group end error %d, status %#x\n",
2205 err, cmd.resp[0]);
2206 err = -EIO;
2207 goto out;
2208 }
2209
2210 memset(&cmd, 0, sizeof(struct mmc_command));
2211 cmd.opcode = MMC_ERASE;
2212 cmd.arg = arg;
2213 busy_timeout = mmc_erase_timeout(card, arg, qty);
2214 /*
2215 * If the host controller supports busy signalling and the timeout for
2216 * the erase operation does not exceed the max_busy_timeout, we should
2217 * use R1B response. Or we need to prevent the host from doing hw busy
2218 * detection, which is done by converting to a R1 response instead.
2219 */
2220 if (card->host->max_busy_timeout &&
2221 busy_timeout > card->host->max_busy_timeout) {
2222 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2223 } else {
2224 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
2225 cmd.busy_timeout = busy_timeout;
2226 use_r1b_resp = true;
2227 }
2228
2229 err = mmc_wait_for_cmd(card->host, &cmd, 0);
2230 if (err) {
2231 pr_err("mmc_erase: erase error %d, status %#x\n",
2232 err, cmd.resp[0]);
2233 err = -EIO;
2234 goto out;
2235 }
2236
2237 if (mmc_host_is_spi(card->host))
2238 goto out;
2239
2240 /*
2241 * In case of when R1B + MMC_CAP_WAIT_WHILE_BUSY is used, the polling
2242 * shall be avoided.
2243 */
2244 if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
2245 goto out;
2246
2247 timeout = jiffies + msecs_to_jiffies(busy_timeout);
2248 do {
2249 memset(&cmd, 0, sizeof(struct mmc_command));
2250 cmd.opcode = MMC_SEND_STATUS;
2251 cmd.arg = card->rca << 16;
2252 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
2253 /* Do not retry else we can't see errors */
2254 err = mmc_wait_for_cmd(card->host, &cmd, 0);
2255 if (err || (cmd.resp[0] & 0xFDF92000)) {
2256 pr_err("error %d requesting status %#x\n",
2257 err, cmd.resp[0]);
2258 err = -EIO;
2259 goto out;
2260 }
2261
2262 /* Timeout if the device never becomes ready for data and
2263 * never leaves the program state.
2264 */
2265 if (time_after(jiffies, timeout)) {
2266 pr_err("%s: Card stuck in programming state! %s\n",
2267 mmc_hostname(card->host), __func__);
2268 err = -EIO;
2269 goto out;
2270 }
2271
2272 } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
2273 (R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG));
2274out:
2275 mmc_retune_release(card->host);
2276 return err;
2277}
2278
2279static unsigned int mmc_align_erase_size(struct mmc_card *card,
2280 unsigned int *from,
2281 unsigned int *to,
2282 unsigned int nr)
2283{
2284 unsigned int from_new = *from, nr_new = nr, rem;
2285
2286 /*
2287 * When the 'card->erase_size' is power of 2, we can use round_up/down()
2288 * to align the erase size efficiently.
2289 */
2290 if (is_power_of_2(card->erase_size)) {
2291 unsigned int temp = from_new;
2292
2293 from_new = round_up(temp, card->erase_size);
2294 rem = from_new - temp;
2295
2296 if (nr_new > rem)
2297 nr_new -= rem;
2298 else
2299 return 0;
2300
2301 nr_new = round_down(nr_new, card->erase_size);
2302 } else {
2303 rem = from_new % card->erase_size;
2304 if (rem) {
2305 rem = card->erase_size - rem;
2306 from_new += rem;
2307 if (nr_new > rem)
2308 nr_new -= rem;
2309 else
2310 return 0;
2311 }
2312
2313 rem = nr_new % card->erase_size;
2314 if (rem)
2315 nr_new -= rem;
2316 }
2317
2318 if (nr_new == 0)
2319 return 0;
2320
2321 *to = from_new + nr_new;
2322 *from = from_new;
2323
2324 return nr_new;
2325}
2326
2327/**
2328 * mmc_erase - erase sectors.
2329 * @card: card to erase
2330 * @from: first sector to erase
2331 * @nr: number of sectors to erase
2332 * @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
2333 *
2334 * Caller must claim host before calling this function.
2335 */
2336int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
2337 unsigned int arg)
2338{
2339 unsigned int rem, to = from + nr;
2340 int err;
2341
2342 if (!(card->host->caps & MMC_CAP_ERASE) ||
2343 !(card->csd.cmdclass & CCC_ERASE))
2344 return -EOPNOTSUPP;
2345
2346 if (!card->erase_size)
2347 return -EOPNOTSUPP;
2348
2349 if (mmc_card_sd(card) && arg != MMC_ERASE_ARG)
2350 return -EOPNOTSUPP;
2351
2352 if ((arg & MMC_SECURE_ARGS) &&
2353 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
2354 return -EOPNOTSUPP;
2355
2356 if ((arg & MMC_TRIM_ARGS) &&
2357 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
2358 return -EOPNOTSUPP;
2359
2360 if (arg == MMC_SECURE_ERASE_ARG) {
2361 if (from % card->erase_size || nr % card->erase_size)
2362 return -EINVAL;
2363 }
2364
2365 if (arg == MMC_ERASE_ARG)
2366 nr = mmc_align_erase_size(card, &from, &to, nr);
2367
2368 if (nr == 0)
2369 return 0;
2370
2371 if (to <= from)
2372 return -EINVAL;
2373
2374 /* 'from' and 'to' are inclusive */
2375 to -= 1;
2376
2377 /*
2378 * Special case where only one erase-group fits in the timeout budget:
2379 * If the region crosses an erase-group boundary on this particular
2380 * case, we will be trimming more than one erase-group which, does not
2381 * fit in the timeout budget of the controller, so we need to split it
2382 * and call mmc_do_erase() twice if necessary. This special case is
2383 * identified by the card->eg_boundary flag.
2384 */
2385 rem = card->erase_size - (from % card->erase_size);
2386 if ((arg & MMC_TRIM_ARGS) && (card->eg_boundary) && (nr > rem)) {
2387 err = mmc_do_erase(card, from, from + rem - 1, arg);
2388 from += rem;
2389 if ((err) || (to <= from))
2390 return err;
2391 }
2392
2393 return mmc_do_erase(card, from, to, arg);
2394}
2395EXPORT_SYMBOL(mmc_erase);
2396
2397int mmc_can_erase(struct mmc_card *card)
2398{
2399 if ((card->host->caps & MMC_CAP_ERASE) &&
2400 (card->csd.cmdclass & CCC_ERASE) && card->erase_size)
2401 return 1;
2402 return 0;
2403}
2404EXPORT_SYMBOL(mmc_can_erase);
2405
2406int mmc_can_trim(struct mmc_card *card)
2407{
2408 if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) &&
2409 (!(card->quirks & MMC_QUIRK_TRIM_BROKEN)))
2410 return 1;
2411 return 0;
2412}
2413EXPORT_SYMBOL(mmc_can_trim);
2414
2415int mmc_can_discard(struct mmc_card *card)
2416{
2417 /*
2418 * As there's no way to detect the discard support bit at v4.5
2419 * use the s/w feature support filed.
2420 */
2421 if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE)
2422 return 1;
2423 return 0;
2424}
2425EXPORT_SYMBOL(mmc_can_discard);
2426
2427int mmc_can_sanitize(struct mmc_card *card)
2428{
2429 if (!mmc_can_trim(card) && !mmc_can_erase(card))
2430 return 0;
2431 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
2432 return 1;
2433 return 0;
2434}
2435EXPORT_SYMBOL(mmc_can_sanitize);
2436
2437int mmc_can_secure_erase_trim(struct mmc_card *card)
2438{
2439 if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) &&
2440 !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
2441 return 1;
2442 return 0;
2443}
2444EXPORT_SYMBOL(mmc_can_secure_erase_trim);
2445
2446int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
2447 unsigned int nr)
2448{
2449 if (!card->erase_size)
2450 return 0;
2451 if (from % card->erase_size || nr % card->erase_size)
2452 return 0;
2453 return 1;
2454}
2455EXPORT_SYMBOL(mmc_erase_group_aligned);
2456
2457static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
2458 unsigned int arg)
2459{
2460 struct mmc_host *host = card->host;
2461 unsigned int max_discard, x, y, qty = 0, max_qty, min_qty, timeout;
2462 unsigned int last_timeout = 0;
2463 unsigned int max_busy_timeout = host->max_busy_timeout ?
2464 host->max_busy_timeout : MMC_ERASE_TIMEOUT_MS;
2465
2466 if (card->erase_shift) {
2467 max_qty = UINT_MAX >> card->erase_shift;
2468 min_qty = card->pref_erase >> card->erase_shift;
2469 } else if (mmc_card_sd(card)) {
2470 max_qty = UINT_MAX;
2471 min_qty = card->pref_erase;
2472 } else {
2473 max_qty = UINT_MAX / card->erase_size;
2474 min_qty = card->pref_erase / card->erase_size;
2475 }
2476
2477 /*
2478 * We should not only use 'host->max_busy_timeout' as the limitation
2479 * when deciding the max discard sectors. We should set a balance value
2480 * to improve the erase speed, and it can not get too long timeout at
2481 * the same time.
2482 *
2483 * Here we set 'card->pref_erase' as the minimal discard sectors no
2484 * matter what size of 'host->max_busy_timeout', but if the
2485 * 'host->max_busy_timeout' is large enough for more discard sectors,
2486 * then we can continue to increase the max discard sectors until we
2487 * get a balance value. In cases when the 'host->max_busy_timeout'
2488 * isn't specified, use the default max erase timeout.
2489 */
2490 do {
2491 y = 0;
2492 for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
2493 timeout = mmc_erase_timeout(card, arg, qty + x);
2494
2495 if (qty + x > min_qty && timeout > max_busy_timeout)
2496 break;
2497
2498 if (timeout < last_timeout)
2499 break;
2500 last_timeout = timeout;
2501 y = x;
2502 }
2503 qty += y;
2504 } while (y);
2505
2506 if (!qty)
2507 return 0;
2508
2509 /*
2510 * When specifying a sector range to trim, chances are we might cross
2511 * an erase-group boundary even if the amount of sectors is less than
2512 * one erase-group.
2513 * If we can only fit one erase-group in the controller timeout budget,
2514 * we have to care that erase-group boundaries are not crossed by a
2515 * single trim operation. We flag that special case with "eg_boundary".
2516 * In all other cases we can just decrement qty and pretend that we
2517 * always touch (qty + 1) erase-groups as a simple optimization.
2518 */
2519 if (qty == 1)
2520 card->eg_boundary = 1;
2521 else
2522 qty--;
2523
2524 /* Convert qty to sectors */
2525 if (card->erase_shift)
2526 max_discard = qty << card->erase_shift;
2527 else if (mmc_card_sd(card))
2528 max_discard = qty + 1;
2529 else
2530 max_discard = qty * card->erase_size;
2531
2532 return max_discard;
2533}
2534
2535unsigned int mmc_calc_max_discard(struct mmc_card *card)
2536{
2537 struct mmc_host *host = card->host;
2538 unsigned int max_discard, max_trim;
2539
2540 /*
2541 * Without erase_group_def set, MMC erase timeout depends on clock
2542 * frequence which can change. In that case, the best choice is
2543 * just the preferred erase size.
2544 */
2545 if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
2546 return card->pref_erase;
2547
2548 max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
2549 if (mmc_can_trim(card)) {
2550 max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
2551 if (max_trim < max_discard)
2552 max_discard = max_trim;
2553 } else if (max_discard < card->erase_size) {
2554 max_discard = 0;
2555 }
2556 pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
2557 mmc_hostname(host), max_discard, host->max_busy_timeout ?
2558 host->max_busy_timeout : MMC_ERASE_TIMEOUT_MS);
2559 return max_discard;
2560}
2561EXPORT_SYMBOL(mmc_calc_max_discard);
2562
2563bool mmc_card_is_blockaddr(struct mmc_card *card)
2564{
2565 return card ? mmc_card_blockaddr(card) : false;
2566}
2567EXPORT_SYMBOL(mmc_card_is_blockaddr);
2568
2569int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
2570{
2571 struct mmc_command cmd = {};
2572
2573 if (mmc_card_blockaddr(card) || mmc_card_ddr52(card) ||
2574 mmc_card_hs400(card) || mmc_card_hs400es(card))
2575 return 0;
2576
2577 cmd.opcode = MMC_SET_BLOCKLEN;
2578 cmd.arg = blocklen;
2579 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2580 return mmc_wait_for_cmd(card->host, &cmd, 5);
2581}
2582EXPORT_SYMBOL(mmc_set_blocklen);
2583
2584int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount,
2585 bool is_rel_write)
2586{
2587 struct mmc_command cmd = {};
2588
2589 cmd.opcode = MMC_SET_BLOCK_COUNT;
2590 cmd.arg = blockcount & 0x0000FFFF;
2591 if (is_rel_write)
2592 cmd.arg |= 1 << 31;
2593 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2594 return mmc_wait_for_cmd(card->host, &cmd, 5);
2595}
2596EXPORT_SYMBOL(mmc_set_blockcount);
2597
2598static void mmc_hw_reset_for_init(struct mmc_host *host)
2599{
2600 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
2601 return;
2602 host->ops->hw_reset(host);
2603}
2604
2605int mmc_hw_reset(struct mmc_host *host)
2606{
2607 int ret;
2608
2609 if (!host->card)
2610 return -EINVAL;
2611
2612 mmc_bus_get(host);
2613 if (!host->bus_ops || host->bus_dead || !host->bus_ops->reset) {
2614 mmc_bus_put(host);
2615 return -EOPNOTSUPP;
2616 }
2617
2618 ret = host->bus_ops->reset(host);
2619 mmc_bus_put(host);
2620
2621 if (ret)
2622 pr_warn("%s: tried to reset card, got error %d\n",
2623 mmc_hostname(host), ret);
2624
2625 return ret;
2626}
2627EXPORT_SYMBOL(mmc_hw_reset);
2628
2629static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
2630{
2631 host->f_init = freq;
2632
2633#ifdef CONFIG_MMC_DEBUG
2634 pr_info("%s: %s: trying to init card at %u Hz\n",
2635 mmc_hostname(host), __func__, host->f_init);
2636#endif
2637 mmc_power_up(host, host->ocr_avail);
2638
2639 /*
2640 * Some eMMCs (with VCCQ always on) may not be reset after power up, so
2641 * do a hardware reset if possible.
2642 */
2643 mmc_hw_reset_for_init(host);
2644
2645 /*
2646 * sdio_reset sends CMD52 to reset card. Since we do not know
2647 * if the card is being re-initialized, just send it. CMD52
2648 * should be ignored by SD/eMMC cards.
2649 * Skip it if we already know that we do not support SDIO commands
2650 */
2651 if (!(host->caps2 & MMC_CAP2_NO_SDIO))
2652 sdio_reset(host);
2653
2654 mmc_go_idle(host);
2655
2656 if (!(host->caps2 & MMC_CAP2_NO_SD))
2657 mmc_send_if_cond(host, host->ocr_avail);
2658
2659 /* Order's important: probe SDIO, then SD, then MMC */
2660 if (!(host->caps2 & MMC_CAP2_NO_SDIO))
2661 if (!mmc_attach_sdio(host))
2662 return 0;
2663
2664 if (!(host->caps2 & MMC_CAP2_NO_SD))
2665 if (!mmc_attach_sd(host))
2666 return 0;
2667
2668 if (!(host->caps2 & MMC_CAP2_NO_MMC))
2669 if (!mmc_attach_mmc(host))
2670 return 0;
2671
2672 mmc_power_off(host);
2673 return -EIO;
2674}
2675
2676int _mmc_detect_card_removed(struct mmc_host *host)
2677{
2678 int ret;
2679
2680 if (!host->card || mmc_card_removed(host->card))
2681 return 1;
2682
2683 ret = host->bus_ops->alive(host);
2684
2685 /*
2686 * Card detect status and alive check may be out of sync if card is
2687 * removed slowly, when card detect switch changes while card/slot
2688 * pads are still contacted in hardware (refer to "SD Card Mechanical
2689 * Addendum, Appendix C: Card Detection Switch"). So reschedule a
2690 * detect work 200ms later for this case.
2691 */
2692 if (!ret && host->ops->get_cd && !host->ops->get_cd(host)) {
2693 mmc_detect_change(host, msecs_to_jiffies(200));
2694 pr_debug("%s: card removed too slowly\n", mmc_hostname(host));
2695 }
2696
2697 if (ret) {
2698 mmc_card_set_removed(host->card);
2699 pr_debug("%s: card remove detected\n", mmc_hostname(host));
2700 }
2701
2702 return ret;
2703}
2704
2705int mmc_detect_card_removed(struct mmc_host *host)
2706{
2707 struct mmc_card *card = host->card;
2708 int ret;
2709
2710 WARN_ON(!host->claimed);
2711
2712 if (!card)
2713 return 1;
2714
2715 if (!mmc_card_is_removable(host))
2716 return 0;
2717
2718 ret = mmc_card_removed(card);
2719 /*
2720 * The card will be considered unchanged unless we have been asked to
2721 * detect a change or host requires polling to provide card detection.
2722 */
2723 if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL))
2724 return ret;
2725
2726 host->detect_change = 0;
2727 if (!ret) {
2728 ret = _mmc_detect_card_removed(host);
2729 if (ret && (host->caps & MMC_CAP_NEEDS_POLL)) {
2730 /*
2731 * Schedule a detect work as soon as possible to let a
2732 * rescan handle the card removal.
2733 */
2734 cancel_delayed_work(&host->detect);
2735 _mmc_detect_change(host, 0, false);
2736 }
2737 }
2738
2739 return ret;
2740}
2741EXPORT_SYMBOL(mmc_detect_card_removed);
2742
2743void mmc_rescan(struct work_struct *work)
2744{
2745 struct mmc_host *host =
2746 container_of(work, struct mmc_host, detect.work);
2747 int i;
2748
2749 if (host->rescan_disable)
2750 return;
2751
2752 /* If there is a non-removable card registered, only scan once */
2753 if (!mmc_card_is_removable(host) && host->rescan_entered)
2754 return;
2755 host->rescan_entered = 1;
2756
2757 if (host->trigger_card_event && host->ops->card_event) {
2758 mmc_claim_host(host);
2759 host->ops->card_event(host);
2760 mmc_release_host(host);
2761 host->trigger_card_event = false;
2762 }
2763
2764 mmc_bus_get(host);
2765
2766 /*
2767 * if there is a _removable_ card registered, check whether it is
2768 * still present
2769 */
2770 if (host->bus_ops && !host->bus_dead && mmc_card_is_removable(host))
2771 host->bus_ops->detect(host);
2772
2773 host->detect_change = 0;
2774
2775 /*
2776 * Let mmc_bus_put() free the bus/bus_ops if we've found that
2777 * the card is no longer present.
2778 */
2779 mmc_bus_put(host);
2780 mmc_bus_get(host);
2781
2782 /* if there still is a card present, stop here */
2783 if (host->bus_ops != NULL) {
2784 mmc_bus_put(host);
2785 goto out;
2786 }
2787
2788 /*
2789 * Only we can add a new handler, so it's safe to
2790 * release the lock here.
2791 */
2792 mmc_bus_put(host);
2793
2794 mmc_claim_host(host);
2795 if (mmc_card_is_removable(host) && host->ops->get_cd &&
2796 host->ops->get_cd(host) == 0) {
2797 mmc_power_off(host);
2798 mmc_release_host(host);
2799 goto out;
2800 }
2801
2802 for (i = 0; i < ARRAY_SIZE(freqs); i++) {
2803 if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
2804 break;
2805 if (freqs[i] <= host->f_min)
2806 break;
2807 }
2808 mmc_release_host(host);
2809
2810 out:
2811 if (host->caps & MMC_CAP_NEEDS_POLL)
2812 mmc_schedule_delayed_work(&host->detect, HZ);
2813}
2814
2815void mmc_start_host(struct mmc_host *host)
2816{
2817 host->f_init = max(freqs[0], host->f_min);
2818 host->rescan_disable = 0;
2819 host->ios.power_mode = MMC_POWER_UNDEFINED;
2820
2821 if (!(host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)) {
2822 mmc_claim_host(host);
2823 mmc_power_up(host, host->ocr_avail);
2824 mmc_release_host(host);
2825 }
2826
2827 mmc_gpiod_request_cd_irq(host);
2828 _mmc_detect_change(host, 0, false);
2829}
2830
2831void mmc_stop_host(struct mmc_host *host)
2832{
2833#ifdef CONFIG_MMC_DEBUG
2834 unsigned long flags;
2835 spin_lock_irqsave(&host->lock, flags);
2836 host->removed = 1;
2837 spin_unlock_irqrestore(&host->lock, flags);
2838#endif
2839 if (host->slot.cd_irq >= 0)
2840 disable_irq(host->slot.cd_irq);
2841
2842 host->rescan_disable = 1;
2843 cancel_delayed_work_sync(&host->detect);
2844
2845 /* clear pm flags now and let card drivers set them as needed */
2846 host->pm_flags = 0;
2847
2848 mmc_bus_get(host);
2849 if (host->bus_ops && !host->bus_dead) {
2850 /* Calling bus_ops->remove() with a claimed host can deadlock */
2851 host->bus_ops->remove(host);
2852 mmc_claim_host(host);
2853 mmc_detach_bus(host);
2854 mmc_power_off(host);
2855 mmc_release_host(host);
2856 mmc_bus_put(host);
2857 return;
2858 }
2859 mmc_bus_put(host);
2860
2861 mmc_claim_host(host);
2862 mmc_power_off(host);
2863 mmc_release_host(host);
2864}
2865
2866int mmc_power_save_host(struct mmc_host *host)
2867{
2868 int ret = 0;
2869
2870#ifdef CONFIG_MMC_DEBUG
2871 pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__);
2872#endif
2873
2874 mmc_bus_get(host);
2875
2876 if (!host->bus_ops || host->bus_dead) {
2877 mmc_bus_put(host);
2878 return -EINVAL;
2879 }
2880
2881 if (host->bus_ops->power_save)
2882 ret = host->bus_ops->power_save(host);
2883
2884 mmc_bus_put(host);
2885
2886 mmc_power_off(host);
2887
2888 return ret;
2889}
2890EXPORT_SYMBOL(mmc_power_save_host);
2891
2892int mmc_power_restore_host(struct mmc_host *host)
2893{
2894 int ret;
2895
2896#ifdef CONFIG_MMC_DEBUG
2897 pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__);
2898#endif
2899
2900 mmc_bus_get(host);
2901
2902 if (!host->bus_ops || host->bus_dead) {
2903 mmc_bus_put(host);
2904 return -EINVAL;
2905 }
2906
2907 mmc_power_up(host, host->card->ocr);
2908 ret = host->bus_ops->power_restore(host);
2909
2910 mmc_bus_put(host);
2911
2912 return ret;
2913}
2914EXPORT_SYMBOL(mmc_power_restore_host);
2915
2916/*
2917 * Flush the cache to the non-volatile storage.
2918 */
2919int mmc_flush_cache(struct mmc_card *card)
2920{
2921 int err = 0;
2922
2923 if (mmc_card_mmc(card) &&
2924 (card->ext_csd.cache_size > 0) &&
2925 (card->ext_csd.cache_ctrl & 1)) {
2926 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2927 EXT_CSD_FLUSH_CACHE, 1, 0);
2928 if (err)
2929 pr_err("%s: cache flush error %d\n",
2930 mmc_hostname(card->host), err);
2931 }
2932
2933 return err;
2934}
2935EXPORT_SYMBOL(mmc_flush_cache);
2936
2937#ifdef CONFIG_PM_SLEEP
2938/* Do the card removal on suspend if card is assumed removeable
2939 * Do that in pm notifier while userspace isn't yet frozen, so we will be able
2940 to sync the card.
2941*/
2942static int mmc_pm_notify(struct notifier_block *notify_block,
2943 unsigned long mode, void *unused)
2944{
2945 struct mmc_host *host = container_of(
2946 notify_block, struct mmc_host, pm_notify);
2947 unsigned long flags;
2948 int err = 0;
2949
2950 switch (mode) {
2951 case PM_HIBERNATION_PREPARE:
2952 case PM_SUSPEND_PREPARE:
2953 case PM_RESTORE_PREPARE:
2954 spin_lock_irqsave(&host->lock, flags);
2955 host->rescan_disable = 1;
2956 spin_unlock_irqrestore(&host->lock, flags);
2957 cancel_delayed_work_sync(&host->detect);
2958
2959 if (!host->bus_ops)
2960 break;
2961
2962 /* Validate prerequisites for suspend */
2963 if (host->bus_ops->pre_suspend)
2964 err = host->bus_ops->pre_suspend(host);
2965 if (!err)
2966 break;
2967
2968 /* Calling bus_ops->remove() with a claimed host can deadlock */
2969 host->bus_ops->remove(host);
2970 mmc_claim_host(host);
2971 mmc_detach_bus(host);
2972 mmc_power_off(host);
2973 mmc_release_host(host);
2974 host->pm_flags = 0;
2975 break;
2976
2977 case PM_POST_SUSPEND:
2978 case PM_POST_HIBERNATION:
2979 case PM_POST_RESTORE:
2980
2981 spin_lock_irqsave(&host->lock, flags);
2982 host->rescan_disable = 0;
2983 spin_unlock_irqrestore(&host->lock, flags);
2984 _mmc_detect_change(host, 0, false);
2985
2986 }
2987
2988 return 0;
2989}
2990
2991void mmc_register_pm_notifier(struct mmc_host *host)
2992{
2993 host->pm_notify.notifier_call = mmc_pm_notify;
2994 register_pm_notifier(&host->pm_notify);
2995}
2996
2997void mmc_unregister_pm_notifier(struct mmc_host *host)
2998{
2999 unregister_pm_notifier(&host->pm_notify);
3000}
3001#endif
3002
3003/**
3004 * mmc_init_context_info() - init synchronization context
3005 * @host: mmc host
3006 *
3007 * Init struct context_info needed to implement asynchronous
3008 * request mechanism, used by mmc core, host driver and mmc requests
3009 * supplier.
3010 */
3011void mmc_init_context_info(struct mmc_host *host)
3012{
3013 host->context_info.is_new_req = false;
3014 host->context_info.is_done_rcv = false;
3015 host->context_info.is_waiting_last_req = false;
3016 init_waitqueue_head(&host->context_info.wait);
3017}
3018
3019static int __init mmc_init(void)
3020{
3021 int ret;
3022
3023 ret = mmc_register_bus();
3024 if (ret)
3025 return ret;
3026
3027 ret = mmc_register_host_class();
3028 if (ret)
3029 goto unregister_bus;
3030
3031 ret = sdio_register_bus();
3032 if (ret)
3033 goto unregister_host_class;
3034
3035 return 0;
3036
3037unregister_host_class:
3038 mmc_unregister_host_class();
3039unregister_bus:
3040 mmc_unregister_bus();
3041 return ret;
3042}
3043
3044static void __exit mmc_exit(void)
3045{
3046 sdio_unregister_bus();
3047 mmc_unregister_host_class();
3048 mmc_unregister_bus();
3049}
3050
3051subsys_initcall(mmc_init);
3052module_exit(mmc_exit);
3053
3054MODULE_LICENSE("GPL");