Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Block driver for media (i.e., flash cards)
3 *
4 * Copyright 2002 Hewlett-Packard Company
5 * Copyright 2005-2008 Pierre Ossman
6 *
7 * Use consistent with the GNU GPL is permitted,
8 * provided that this copyright notice is
9 * preserved in its entirety in all copies and derived works.
10 *
11 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
12 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
13 * FITNESS FOR ANY PARTICULAR PURPOSE.
14 *
15 * Many thanks to Alessandro Rubini and Jonathan Corbet!
16 *
17 * Author: Andrew Christian
18 * 28 May 2002
19 */
20#include <linux/moduleparam.h>
21#include <linux/module.h>
22#include <linux/init.h>
23
24#include <linux/kernel.h>
25#include <linux/fs.h>
26#include <linux/slab.h>
27#include <linux/errno.h>
28#include <linux/hdreg.h>
29#include <linux/kdev_t.h>
30#include <linux/blkdev.h>
31#include <linux/mutex.h>
32#include <linux/scatterlist.h>
33#include <linux/string_helpers.h>
34#include <linux/delay.h>
35#include <linux/capability.h>
36#include <linux/compat.h>
37#include <linux/pm_runtime.h>
38#include <linux/idr.h>
39#include <linux/debugfs.h>
40
41#include <linux/mmc/ioctl.h>
42#include <linux/mmc/card.h>
43#include <linux/mmc/host.h>
44#include <linux/mmc/mmc.h>
45#include <linux/mmc/sd.h>
46
47#include <linux/uaccess.h>
48
49#include "queue.h"
50#include "block.h"
51#include "core.h"
52#include "card.h"
53#include "host.h"
54#include "bus.h"
55#include "mmc_ops.h"
56#include "quirks.h"
57#include "sd_ops.h"
58
59MODULE_ALIAS("mmc:block");
60#ifdef MODULE_PARAM_PREFIX
61#undef MODULE_PARAM_PREFIX
62#endif
63#define MODULE_PARAM_PREFIX "mmcblk."
64
65#define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
66#define MMC_SANITIZE_REQ_TIMEOUT 240000
67#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
68
69#define mmc_req_rel_wr(req) ((req->cmd_flags & REQ_FUA) && \
70 (rq_data_dir(req) == WRITE))
71static DEFINE_MUTEX(block_mutex);
72
73/*
74 * The defaults come from config options but can be overriden by module
75 * or bootarg options.
76 */
77static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
78
79/*
80 * We've only got one major, so number of mmcblk devices is
81 * limited to (1 << 20) / number of minors per device. It is also
82 * limited by the MAX_DEVICES below.
83 */
84static int max_devices;
85
86#define MAX_DEVICES 256
87
88static DEFINE_IDA(mmc_blk_ida);
89
90/*
91 * There is one mmc_blk_data per slot.
92 */
93struct mmc_blk_data {
94 spinlock_t lock;
95 struct device *parent;
96 struct gendisk *disk;
97 struct mmc_queue queue;
98 struct list_head part;
99
100 unsigned int flags;
101#define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
102#define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
103
104 unsigned int usage;
105 unsigned int read_only;
106 unsigned int part_type;
107 unsigned int reset_done;
108#define MMC_BLK_READ BIT(0)
109#define MMC_BLK_WRITE BIT(1)
110#define MMC_BLK_DISCARD BIT(2)
111#define MMC_BLK_SECDISCARD BIT(3)
112
113 /*
114 * Only set in main mmc_blk_data associated
115 * with mmc_card with dev_set_drvdata, and keeps
116 * track of the current selected device partition.
117 */
118 unsigned int part_curr;
119 struct device_attribute force_ro;
120 struct device_attribute power_ro_lock;
121 int area_type;
122};
123
124static DEFINE_MUTEX(open_lock);
125
126module_param(perdev_minors, int, 0444);
127MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
128
129static inline int mmc_blk_part_switch(struct mmc_card *card,
130 unsigned int part_type);
131
132static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
133{
134 struct mmc_blk_data *md;
135
136 mutex_lock(&open_lock);
137 md = disk->private_data;
138 if (md && md->usage == 0)
139 md = NULL;
140 if (md)
141 md->usage++;
142 mutex_unlock(&open_lock);
143
144 return md;
145}
146
147static inline int mmc_get_devidx(struct gendisk *disk)
148{
149 int devidx = disk->first_minor / perdev_minors;
150 return devidx;
151}
152
153static void mmc_blk_put(struct mmc_blk_data *md)
154{
155 mutex_lock(&open_lock);
156 md->usage--;
157 if (md->usage == 0) {
158 int devidx = mmc_get_devidx(md->disk);
159 blk_cleanup_queue(md->queue.queue);
160 ida_simple_remove(&mmc_blk_ida, devidx);
161 put_disk(md->disk);
162 kfree(md);
163 }
164 mutex_unlock(&open_lock);
165}
166
167static ssize_t power_ro_lock_show(struct device *dev,
168 struct device_attribute *attr, char *buf)
169{
170 int ret;
171 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
172 struct mmc_card *card = md->queue.card;
173 int locked = 0;
174
175 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN)
176 locked = 2;
177 else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
178 locked = 1;
179
180 ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
181
182 mmc_blk_put(md);
183
184 return ret;
185}
186
187static ssize_t power_ro_lock_store(struct device *dev,
188 struct device_attribute *attr, const char *buf, size_t count)
189{
190 int ret;
191 struct mmc_blk_data *md, *part_md;
192 struct mmc_queue *mq;
193 struct request *req;
194 unsigned long set;
195
196 if (kstrtoul(buf, 0, &set))
197 return -EINVAL;
198
199 if (set != 1)
200 return count;
201
202 md = mmc_blk_get(dev_to_disk(dev));
203 mq = &md->queue;
204
205 /* Dispatch locking to the block layer */
206 req = blk_get_request(mq->queue, REQ_OP_DRV_OUT, __GFP_RECLAIM);
207 req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP;
208 blk_execute_rq(mq->queue, NULL, req, 0);
209 ret = req_to_mmc_queue_req(req)->drv_op_result;
210
211 if (!ret) {
212 pr_info("%s: Locking boot partition ro until next power on\n",
213 md->disk->disk_name);
214 set_disk_ro(md->disk, 1);
215
216 list_for_each_entry(part_md, &md->part, part)
217 if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) {
218 pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name);
219 set_disk_ro(part_md->disk, 1);
220 }
221 }
222
223 mmc_blk_put(md);
224 return count;
225}
226
227static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
228 char *buf)
229{
230 int ret;
231 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
232
233 ret = snprintf(buf, PAGE_SIZE, "%d\n",
234 get_disk_ro(dev_to_disk(dev)) ^
235 md->read_only);
236 mmc_blk_put(md);
237 return ret;
238}
239
240static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
241 const char *buf, size_t count)
242{
243 int ret;
244 char *end;
245 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
246 unsigned long set = simple_strtoul(buf, &end, 0);
247 if (end == buf) {
248 ret = -EINVAL;
249 goto out;
250 }
251
252 set_disk_ro(dev_to_disk(dev), set || md->read_only);
253 ret = count;
254out:
255 mmc_blk_put(md);
256 return ret;
257}
258
259static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
260{
261 struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
262 int ret = -ENXIO;
263
264 mutex_lock(&block_mutex);
265 if (md) {
266 if (md->usage == 2)
267 check_disk_change(bdev);
268 ret = 0;
269
270 if ((mode & FMODE_WRITE) && md->read_only) {
271 mmc_blk_put(md);
272 ret = -EROFS;
273 }
274 }
275 mutex_unlock(&block_mutex);
276
277 return ret;
278}
279
280static void mmc_blk_release(struct gendisk *disk, fmode_t mode)
281{
282 struct mmc_blk_data *md = disk->private_data;
283
284 mutex_lock(&block_mutex);
285 mmc_blk_put(md);
286 mutex_unlock(&block_mutex);
287}
288
289static int
290mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
291{
292 geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
293 geo->heads = 4;
294 geo->sectors = 16;
295 return 0;
296}
297
298struct mmc_blk_ioc_data {
299 struct mmc_ioc_cmd ic;
300 unsigned char *buf;
301 u64 buf_bytes;
302};
303
304static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
305 struct mmc_ioc_cmd __user *user)
306{
307 struct mmc_blk_ioc_data *idata;
308 int err;
309
310 idata = kmalloc(sizeof(*idata), GFP_KERNEL);
311 if (!idata) {
312 err = -ENOMEM;
313 goto out;
314 }
315
316 if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
317 err = -EFAULT;
318 goto idata_err;
319 }
320
321 idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
322 if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
323 err = -EOVERFLOW;
324 goto idata_err;
325 }
326
327 if (!idata->buf_bytes) {
328 idata->buf = NULL;
329 return idata;
330 }
331
332 idata->buf = kmalloc(idata->buf_bytes, GFP_KERNEL);
333 if (!idata->buf) {
334 err = -ENOMEM;
335 goto idata_err;
336 }
337
338 if (copy_from_user(idata->buf, (void __user *)(unsigned long)
339 idata->ic.data_ptr, idata->buf_bytes)) {
340 err = -EFAULT;
341 goto copy_err;
342 }
343
344 return idata;
345
346copy_err:
347 kfree(idata->buf);
348idata_err:
349 kfree(idata);
350out:
351 return ERR_PTR(err);
352}
353
354static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr,
355 struct mmc_blk_ioc_data *idata)
356{
357 struct mmc_ioc_cmd *ic = &idata->ic;
358
359 if (copy_to_user(&(ic_ptr->response), ic->response,
360 sizeof(ic->response)))
361 return -EFAULT;
362
363 if (!idata->ic.write_flag) {
364 if (copy_to_user((void __user *)(unsigned long)ic->data_ptr,
365 idata->buf, idata->buf_bytes))
366 return -EFAULT;
367 }
368
369 return 0;
370}
371
372static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status,
373 u32 retries_max)
374{
375 int err;
376 u32 retry_count = 0;
377
378 if (!status || !retries_max)
379 return -EINVAL;
380
381 do {
382 err = __mmc_send_status(card, status, 5);
383 if (err)
384 break;
385
386 if (!R1_STATUS(*status) &&
387 (R1_CURRENT_STATE(*status) != R1_STATE_PRG))
388 break; /* RPMB programming operation complete */
389
390 /*
391 * Rechedule to give the MMC device a chance to continue
392 * processing the previous command without being polled too
393 * frequently.
394 */
395 usleep_range(1000, 5000);
396 } while (++retry_count < retries_max);
397
398 if (retry_count == retries_max)
399 err = -EPERM;
400
401 return err;
402}
403
404static int ioctl_do_sanitize(struct mmc_card *card)
405{
406 int err;
407
408 if (!mmc_can_sanitize(card)) {
409 pr_warn("%s: %s - SANITIZE is not supported\n",
410 mmc_hostname(card->host), __func__);
411 err = -EOPNOTSUPP;
412 goto out;
413 }
414
415 pr_debug("%s: %s - SANITIZE IN PROGRESS...\n",
416 mmc_hostname(card->host), __func__);
417
418 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
419 EXT_CSD_SANITIZE_START, 1,
420 MMC_SANITIZE_REQ_TIMEOUT);
421
422 if (err)
423 pr_err("%s: %s - EXT_CSD_SANITIZE_START failed. err=%d\n",
424 mmc_hostname(card->host), __func__, err);
425
426 pr_debug("%s: %s - SANITIZE COMPLETED\n", mmc_hostname(card->host),
427 __func__);
428out:
429 return err;
430}
431
432static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
433 struct mmc_blk_ioc_data *idata)
434{
435 struct mmc_command cmd = {};
436 struct mmc_data data = {};
437 struct mmc_request mrq = {};
438 struct scatterlist sg;
439 int err;
440 bool is_rpmb = false;
441 u32 status = 0;
442
443 if (!card || !md || !idata)
444 return -EINVAL;
445
446 if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
447 is_rpmb = true;
448
449 cmd.opcode = idata->ic.opcode;
450 cmd.arg = idata->ic.arg;
451 cmd.flags = idata->ic.flags;
452
453 if (idata->buf_bytes) {
454 data.sg = &sg;
455 data.sg_len = 1;
456 data.blksz = idata->ic.blksz;
457 data.blocks = idata->ic.blocks;
458
459 sg_init_one(data.sg, idata->buf, idata->buf_bytes);
460
461 if (idata->ic.write_flag)
462 data.flags = MMC_DATA_WRITE;
463 else
464 data.flags = MMC_DATA_READ;
465
466 /* data.flags must already be set before doing this. */
467 mmc_set_data_timeout(&data, card);
468
469 /* Allow overriding the timeout_ns for empirical tuning. */
470 if (idata->ic.data_timeout_ns)
471 data.timeout_ns = idata->ic.data_timeout_ns;
472
473 if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
474 /*
475 * Pretend this is a data transfer and rely on the
476 * host driver to compute timeout. When all host
477 * drivers support cmd.cmd_timeout for R1B, this
478 * can be changed to:
479 *
480 * mrq.data = NULL;
481 * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
482 */
483 data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
484 }
485
486 mrq.data = &data;
487 }
488
489 mrq.cmd = &cmd;
490
491 err = mmc_blk_part_switch(card, md->part_type);
492 if (err)
493 return err;
494
495 if (idata->ic.is_acmd) {
496 err = mmc_app_cmd(card->host, card);
497 if (err)
498 return err;
499 }
500
501 if (is_rpmb) {
502 err = mmc_set_blockcount(card, data.blocks,
503 idata->ic.write_flag & (1 << 31));
504 if (err)
505 return err;
506 }
507
508 if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) &&
509 (cmd.opcode == MMC_SWITCH)) {
510 err = ioctl_do_sanitize(card);
511
512 if (err)
513 pr_err("%s: ioctl_do_sanitize() failed. err = %d",
514 __func__, err);
515
516 return err;
517 }
518
519 mmc_wait_for_req(card->host, &mrq);
520
521 if (cmd.error) {
522 dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
523 __func__, cmd.error);
524 return cmd.error;
525 }
526 if (data.error) {
527 dev_err(mmc_dev(card->host), "%s: data error %d\n",
528 __func__, data.error);
529 return data.error;
530 }
531
532 /*
533 * According to the SD specs, some commands require a delay after
534 * issuing the command.
535 */
536 if (idata->ic.postsleep_min_us)
537 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
538
539 memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp));
540
541 if (is_rpmb) {
542 /*
543 * Ensure RPMB command has completed by polling CMD13
544 * "Send Status".
545 */
546 err = ioctl_rpmb_card_status_poll(card, &status, 5);
547 if (err)
548 dev_err(mmc_dev(card->host),
549 "%s: Card Status=0x%08X, error %d\n",
550 __func__, status, err);
551 }
552
553 return err;
554}
555
556static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
557 struct mmc_ioc_cmd __user *ic_ptr)
558{
559 struct mmc_blk_ioc_data *idata;
560 struct mmc_blk_ioc_data *idatas[1];
561 struct mmc_queue *mq;
562 struct mmc_card *card;
563 int err = 0, ioc_err = 0;
564 struct request *req;
565
566 idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
567 if (IS_ERR(idata))
568 return PTR_ERR(idata);
569
570 card = md->queue.card;
571 if (IS_ERR(card)) {
572 err = PTR_ERR(card);
573 goto cmd_done;
574 }
575
576 /*
577 * Dispatch the ioctl() into the block request queue.
578 */
579 mq = &md->queue;
580 req = blk_get_request(mq->queue,
581 idata->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
582 __GFP_RECLAIM);
583 idatas[0] = idata;
584 req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_IOCTL;
585 req_to_mmc_queue_req(req)->drv_op_data = idatas;
586 req_to_mmc_queue_req(req)->ioc_count = 1;
587 blk_execute_rq(mq->queue, NULL, req, 0);
588 ioc_err = req_to_mmc_queue_req(req)->drv_op_result;
589 err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
590 blk_put_request(req);
591
592cmd_done:
593 kfree(idata->buf);
594 kfree(idata);
595 return ioc_err ? ioc_err : err;
596}
597
598static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
599 struct mmc_ioc_multi_cmd __user *user)
600{
601 struct mmc_blk_ioc_data **idata = NULL;
602 struct mmc_ioc_cmd __user *cmds = user->cmds;
603 struct mmc_card *card;
604 struct mmc_queue *mq;
605 int i, err = 0, ioc_err = 0;
606 __u64 num_of_cmds;
607 struct request *req;
608
609 if (copy_from_user(&num_of_cmds, &user->num_of_cmds,
610 sizeof(num_of_cmds)))
611 return -EFAULT;
612
613 if (!num_of_cmds)
614 return 0;
615
616 if (num_of_cmds > MMC_IOC_MAX_CMDS)
617 return -EINVAL;
618
619 idata = kcalloc(num_of_cmds, sizeof(*idata), GFP_KERNEL);
620 if (!idata)
621 return -ENOMEM;
622
623 for (i = 0; i < num_of_cmds; i++) {
624 idata[i] = mmc_blk_ioctl_copy_from_user(&cmds[i]);
625 if (IS_ERR(idata[i])) {
626 err = PTR_ERR(idata[i]);
627 num_of_cmds = i;
628 goto cmd_err;
629 }
630 }
631
632 card = md->queue.card;
633 if (IS_ERR(card)) {
634 err = PTR_ERR(card);
635 goto cmd_err;
636 }
637
638
639 /*
640 * Dispatch the ioctl()s into the block request queue.
641 */
642 mq = &md->queue;
643 req = blk_get_request(mq->queue,
644 idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
645 __GFP_RECLAIM);
646 req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_IOCTL;
647 req_to_mmc_queue_req(req)->drv_op_data = idata;
648 req_to_mmc_queue_req(req)->ioc_count = num_of_cmds;
649 blk_execute_rq(mq->queue, NULL, req, 0);
650 ioc_err = req_to_mmc_queue_req(req)->drv_op_result;
651
652 /* copy to user if data and response */
653 for (i = 0; i < num_of_cmds && !err; i++)
654 err = mmc_blk_ioctl_copy_to_user(&cmds[i], idata[i]);
655
656 blk_put_request(req);
657
658cmd_err:
659 for (i = 0; i < num_of_cmds; i++) {
660 kfree(idata[i]->buf);
661 kfree(idata[i]);
662 }
663 kfree(idata);
664 return ioc_err ? ioc_err : err;
665}
666
667static int mmc_blk_check_blkdev(struct block_device *bdev)
668{
669 /*
670 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
671 * whole block device, not on a partition. This prevents overspray
672 * between sibling partitions.
673 */
674 if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
675 return -EPERM;
676 return 0;
677}
678
679static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
680 unsigned int cmd, unsigned long arg)
681{
682 struct mmc_blk_data *md;
683 int ret;
684
685 switch (cmd) {
686 case MMC_IOC_CMD:
687 ret = mmc_blk_check_blkdev(bdev);
688 if (ret)
689 return ret;
690 md = mmc_blk_get(bdev->bd_disk);
691 if (!md)
692 return -EINVAL;
693 ret = mmc_blk_ioctl_cmd(md,
694 (struct mmc_ioc_cmd __user *)arg);
695 mmc_blk_put(md);
696 return ret;
697 case MMC_IOC_MULTI_CMD:
698 ret = mmc_blk_check_blkdev(bdev);
699 if (ret)
700 return ret;
701 md = mmc_blk_get(bdev->bd_disk);
702 if (!md)
703 return -EINVAL;
704 ret = mmc_blk_ioctl_multi_cmd(md,
705 (struct mmc_ioc_multi_cmd __user *)arg);
706 mmc_blk_put(md);
707 return ret;
708 default:
709 return -EINVAL;
710 }
711}
712
713#ifdef CONFIG_COMPAT
714static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
715 unsigned int cmd, unsigned long arg)
716{
717 return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
718}
719#endif
720
721static const struct block_device_operations mmc_bdops = {
722 .open = mmc_blk_open,
723 .release = mmc_blk_release,
724 .getgeo = mmc_blk_getgeo,
725 .owner = THIS_MODULE,
726 .ioctl = mmc_blk_ioctl,
727#ifdef CONFIG_COMPAT
728 .compat_ioctl = mmc_blk_compat_ioctl,
729#endif
730};
731
732static int mmc_blk_part_switch_pre(struct mmc_card *card,
733 unsigned int part_type)
734{
735 int ret = 0;
736
737 if (part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) {
738 if (card->ext_csd.cmdq_en) {
739 ret = mmc_cmdq_disable(card);
740 if (ret)
741 return ret;
742 }
743 mmc_retune_pause(card->host);
744 }
745
746 return ret;
747}
748
749static int mmc_blk_part_switch_post(struct mmc_card *card,
750 unsigned int part_type)
751{
752 int ret = 0;
753
754 if (part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) {
755 mmc_retune_unpause(card->host);
756 if (card->reenable_cmdq && !card->ext_csd.cmdq_en)
757 ret = mmc_cmdq_enable(card);
758 }
759
760 return ret;
761}
762
763static inline int mmc_blk_part_switch(struct mmc_card *card,
764 unsigned int part_type)
765{
766 int ret = 0;
767 struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev);
768
769 if (main_md->part_curr == part_type)
770 return 0;
771
772 if (mmc_card_mmc(card)) {
773 u8 part_config = card->ext_csd.part_config;
774
775 ret = mmc_blk_part_switch_pre(card, part_type);
776 if (ret)
777 return ret;
778
779 part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
780 part_config |= part_type;
781
782 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
783 EXT_CSD_PART_CONFIG, part_config,
784 card->ext_csd.part_time);
785 if (ret) {
786 mmc_blk_part_switch_post(card, part_type);
787 return ret;
788 }
789
790 card->ext_csd.part_config = part_config;
791
792 ret = mmc_blk_part_switch_post(card, main_md->part_curr);
793 }
794
795 main_md->part_curr = part_type;
796 return ret;
797}
798
799static int mmc_sd_num_wr_blocks(struct mmc_card *card, u32 *written_blocks)
800{
801 int err;
802 u32 result;
803 __be32 *blocks;
804
805 struct mmc_request mrq = {};
806 struct mmc_command cmd = {};
807 struct mmc_data data = {};
808
809 struct scatterlist sg;
810
811 cmd.opcode = MMC_APP_CMD;
812 cmd.arg = card->rca << 16;
813 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
814
815 err = mmc_wait_for_cmd(card->host, &cmd, 0);
816 if (err)
817 return err;
818 if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
819 return -EIO;
820
821 memset(&cmd, 0, sizeof(struct mmc_command));
822
823 cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
824 cmd.arg = 0;
825 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
826
827 data.blksz = 4;
828 data.blocks = 1;
829 data.flags = MMC_DATA_READ;
830 data.sg = &sg;
831 data.sg_len = 1;
832 mmc_set_data_timeout(&data, card);
833
834 mrq.cmd = &cmd;
835 mrq.data = &data;
836
837 blocks = kmalloc(4, GFP_KERNEL);
838 if (!blocks)
839 return -ENOMEM;
840
841 sg_init_one(&sg, blocks, 4);
842
843 mmc_wait_for_req(card->host, &mrq);
844
845 result = ntohl(*blocks);
846 kfree(blocks);
847
848 if (cmd.error || data.error)
849 return -EIO;
850
851 *written_blocks = result;
852
853 return 0;
854}
855
856static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
857 bool hw_busy_detect, struct request *req, bool *gen_err)
858{
859 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
860 int err = 0;
861 u32 status;
862
863 do {
864 err = __mmc_send_status(card, &status, 5);
865 if (err) {
866 pr_err("%s: error %d requesting status\n",
867 req->rq_disk->disk_name, err);
868 return err;
869 }
870
871 if (status & R1_ERROR) {
872 pr_err("%s: %s: error sending status cmd, status %#x\n",
873 req->rq_disk->disk_name, __func__, status);
874 *gen_err = true;
875 }
876
877 /* We may rely on the host hw to handle busy detection.*/
878 if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) &&
879 hw_busy_detect)
880 break;
881
882 /*
883 * Timeout if the device never becomes ready for data and never
884 * leaves the program state.
885 */
886 if (time_after(jiffies, timeout)) {
887 pr_err("%s: Card stuck in programming state! %s %s\n",
888 mmc_hostname(card->host),
889 req->rq_disk->disk_name, __func__);
890 return -ETIMEDOUT;
891 }
892
893 /*
894 * Some cards mishandle the status bits,
895 * so make sure to check both the busy
896 * indication and the card state.
897 */
898 } while (!(status & R1_READY_FOR_DATA) ||
899 (R1_CURRENT_STATE(status) == R1_STATE_PRG));
900
901 return err;
902}
903
904static int send_stop(struct mmc_card *card, unsigned int timeout_ms,
905 struct request *req, bool *gen_err, u32 *stop_status)
906{
907 struct mmc_host *host = card->host;
908 struct mmc_command cmd = {};
909 int err;
910 bool use_r1b_resp = rq_data_dir(req) == WRITE;
911
912 /*
913 * Normally we use R1B responses for WRITE, but in cases where the host
914 * has specified a max_busy_timeout we need to validate it. A failure
915 * means we need to prevent the host from doing hw busy detection, which
916 * is done by converting to a R1 response instead.
917 */
918 if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout))
919 use_r1b_resp = false;
920
921 cmd.opcode = MMC_STOP_TRANSMISSION;
922 if (use_r1b_resp) {
923 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
924 cmd.busy_timeout = timeout_ms;
925 } else {
926 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
927 }
928
929 err = mmc_wait_for_cmd(host, &cmd, 5);
930 if (err)
931 return err;
932
933 *stop_status = cmd.resp[0];
934
935 /* No need to check card status in case of READ. */
936 if (rq_data_dir(req) == READ)
937 return 0;
938
939 if (!mmc_host_is_spi(host) &&
940 (*stop_status & R1_ERROR)) {
941 pr_err("%s: %s: general error sending stop command, resp %#x\n",
942 req->rq_disk->disk_name, __func__, *stop_status);
943 *gen_err = true;
944 }
945
946 return card_busy_detect(card, timeout_ms, use_r1b_resp, req, gen_err);
947}
948
949#define ERR_NOMEDIUM 3
950#define ERR_RETRY 2
951#define ERR_ABORT 1
952#define ERR_CONTINUE 0
953
954static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
955 bool status_valid, u32 status)
956{
957 switch (error) {
958 case -EILSEQ:
959 /* response crc error, retry the r/w cmd */
960 pr_err("%s: %s sending %s command, card status %#x\n",
961 req->rq_disk->disk_name, "response CRC error",
962 name, status);
963 return ERR_RETRY;
964
965 case -ETIMEDOUT:
966 pr_err("%s: %s sending %s command, card status %#x\n",
967 req->rq_disk->disk_name, "timed out", name, status);
968
969 /* If the status cmd initially failed, retry the r/w cmd */
970 if (!status_valid) {
971 pr_err("%s: status not valid, retrying timeout\n",
972 req->rq_disk->disk_name);
973 return ERR_RETRY;
974 }
975
976 /*
977 * If it was a r/w cmd crc error, or illegal command
978 * (eg, issued in wrong state) then retry - we should
979 * have corrected the state problem above.
980 */
981 if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) {
982 pr_err("%s: command error, retrying timeout\n",
983 req->rq_disk->disk_name);
984 return ERR_RETRY;
985 }
986
987 /* Otherwise abort the command */
988 return ERR_ABORT;
989
990 default:
991 /* We don't understand the error code the driver gave us */
992 pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
993 req->rq_disk->disk_name, error, status);
994 return ERR_ABORT;
995 }
996}
997
998/*
999 * Initial r/w and stop cmd error recovery.
1000 * We don't know whether the card received the r/w cmd or not, so try to
1001 * restore things back to a sane state. Essentially, we do this as follows:
1002 * - Obtain card status. If the first attempt to obtain card status fails,
1003 * the status word will reflect the failed status cmd, not the failed
1004 * r/w cmd. If we fail to obtain card status, it suggests we can no
1005 * longer communicate with the card.
1006 * - Check the card state. If the card received the cmd but there was a
1007 * transient problem with the response, it might still be in a data transfer
1008 * mode. Try to send it a stop command. If this fails, we can't recover.
1009 * - If the r/w cmd failed due to a response CRC error, it was probably
1010 * transient, so retry the cmd.
1011 * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
1012 * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
1013 * illegal cmd, retry.
1014 * Otherwise we don't understand what happened, so abort.
1015 */
1016static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
1017 struct mmc_blk_request *brq, bool *ecc_err, bool *gen_err)
1018{
1019 bool prev_cmd_status_valid = true;
1020 u32 status, stop_status = 0;
1021 int err, retry;
1022
1023 if (mmc_card_removed(card))
1024 return ERR_NOMEDIUM;
1025
1026 /*
1027 * Try to get card status which indicates both the card state
1028 * and why there was no response. If the first attempt fails,
1029 * we can't be sure the returned status is for the r/w command.
1030 */
1031 for (retry = 2; retry >= 0; retry--) {
1032 err = __mmc_send_status(card, &status, 0);
1033 if (!err)
1034 break;
1035
1036 /* Re-tune if needed */
1037 mmc_retune_recheck(card->host);
1038
1039 prev_cmd_status_valid = false;
1040 pr_err("%s: error %d sending status command, %sing\n",
1041 req->rq_disk->disk_name, err, retry ? "retry" : "abort");
1042 }
1043
1044 /* We couldn't get a response from the card. Give up. */
1045 if (err) {
1046 /* Check if the card is removed */
1047 if (mmc_detect_card_removed(card->host))
1048 return ERR_NOMEDIUM;
1049 return ERR_ABORT;
1050 }
1051
1052 /* Flag ECC errors */
1053 if ((status & R1_CARD_ECC_FAILED) ||
1054 (brq->stop.resp[0] & R1_CARD_ECC_FAILED) ||
1055 (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
1056 *ecc_err = true;
1057
1058 /* Flag General errors */
1059 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
1060 if ((status & R1_ERROR) ||
1061 (brq->stop.resp[0] & R1_ERROR)) {
1062 pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
1063 req->rq_disk->disk_name, __func__,
1064 brq->stop.resp[0], status);
1065 *gen_err = true;
1066 }
1067
1068 /*
1069 * Check the current card state. If it is in some data transfer
1070 * mode, tell it to stop (and hopefully transition back to TRAN.)
1071 */
1072 if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
1073 R1_CURRENT_STATE(status) == R1_STATE_RCV) {
1074 err = send_stop(card,
1075 DIV_ROUND_UP(brq->data.timeout_ns, 1000000),
1076 req, gen_err, &stop_status);
1077 if (err) {
1078 pr_err("%s: error %d sending stop command\n",
1079 req->rq_disk->disk_name, err);
1080 /*
1081 * If the stop cmd also timed out, the card is probably
1082 * not present, so abort. Other errors are bad news too.
1083 */
1084 return ERR_ABORT;
1085 }
1086
1087 if (stop_status & R1_CARD_ECC_FAILED)
1088 *ecc_err = true;
1089 }
1090
1091 /* Check for set block count errors */
1092 if (brq->sbc.error)
1093 return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error,
1094 prev_cmd_status_valid, status);
1095
1096 /* Check for r/w command errors */
1097 if (brq->cmd.error)
1098 return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
1099 prev_cmd_status_valid, status);
1100
1101 /* Data errors */
1102 if (!brq->stop.error)
1103 return ERR_CONTINUE;
1104
1105 /* Now for stop errors. These aren't fatal to the transfer. */
1106 pr_info("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
1107 req->rq_disk->disk_name, brq->stop.error,
1108 brq->cmd.resp[0], status);
1109
1110 /*
1111 * Subsitute in our own stop status as this will give the error
1112 * state which happened during the execution of the r/w command.
1113 */
1114 if (stop_status) {
1115 brq->stop.resp[0] = stop_status;
1116 brq->stop.error = 0;
1117 }
1118 return ERR_CONTINUE;
1119}
1120
1121static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
1122 int type)
1123{
1124 int err;
1125
1126 if (md->reset_done & type)
1127 return -EEXIST;
1128
1129 md->reset_done |= type;
1130 err = mmc_hw_reset(host);
1131 /* Ensure we switch back to the correct partition */
1132 if (err != -EOPNOTSUPP) {
1133 struct mmc_blk_data *main_md =
1134 dev_get_drvdata(&host->card->dev);
1135 int part_err;
1136
1137 main_md->part_curr = main_md->part_type;
1138 part_err = mmc_blk_part_switch(host->card, md->part_type);
1139 if (part_err) {
1140 /*
1141 * We have failed to get back into the correct
1142 * partition, so we need to abort the whole request.
1143 */
1144 return -ENODEV;
1145 }
1146 }
1147 return err;
1148}
1149
1150static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
1151{
1152 md->reset_done &= ~type;
1153}
1154
1155int mmc_access_rpmb(struct mmc_queue *mq)
1156{
1157 struct mmc_blk_data *md = mq->blkdata;
1158 /*
1159 * If this is a RPMB partition access, return ture
1160 */
1161 if (md && md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
1162 return true;
1163
1164 return false;
1165}
1166
1167/*
1168 * The non-block commands come back from the block layer after it queued it and
1169 * processed it with all other requests and then they get issued in this
1170 * function.
1171 */
1172static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
1173{
1174 struct mmc_queue_req *mq_rq;
1175 struct mmc_card *card = mq->card;
1176 struct mmc_blk_data *md = mq->blkdata;
1177 struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev);
1178 struct mmc_blk_ioc_data **idata;
1179 u8 **ext_csd;
1180 u32 status;
1181 int ret;
1182 int i;
1183
1184 mq_rq = req_to_mmc_queue_req(req);
1185
1186 switch (mq_rq->drv_op) {
1187 case MMC_DRV_OP_IOCTL:
1188 idata = mq_rq->drv_op_data;
1189 for (i = 0, ret = 0; i < mq_rq->ioc_count; i++) {
1190 ret = __mmc_blk_ioctl_cmd(card, md, idata[i]);
1191 if (ret)
1192 break;
1193 }
1194 /* Always switch back to main area after RPMB access */
1195 if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
1196 mmc_blk_part_switch(card, main_md->part_type);
1197 break;
1198 case MMC_DRV_OP_BOOT_WP:
1199 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
1200 card->ext_csd.boot_ro_lock |
1201 EXT_CSD_BOOT_WP_B_PWR_WP_EN,
1202 card->ext_csd.part_time);
1203 if (ret)
1204 pr_err("%s: Locking boot partition ro until next power on failed: %d\n",
1205 md->disk->disk_name, ret);
1206 else
1207 card->ext_csd.boot_ro_lock |=
1208 EXT_CSD_BOOT_WP_B_PWR_WP_EN;
1209 break;
1210 case MMC_DRV_OP_GET_CARD_STATUS:
1211 ret = mmc_send_status(card, &status);
1212 if (!ret)
1213 ret = status;
1214 break;
1215 case MMC_DRV_OP_GET_EXT_CSD:
1216 ext_csd = mq_rq->drv_op_data;
1217 ret = mmc_get_ext_csd(card, ext_csd);
1218 break;
1219 default:
1220 pr_err("%s: unknown driver specific operation\n",
1221 md->disk->disk_name);
1222 ret = -EINVAL;
1223 break;
1224 }
1225 mq_rq->drv_op_result = ret;
1226 blk_end_request_all(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
1227}
1228
1229static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
1230{
1231 struct mmc_blk_data *md = mq->blkdata;
1232 struct mmc_card *card = md->queue.card;
1233 unsigned int from, nr, arg;
1234 int err = 0, type = MMC_BLK_DISCARD;
1235 blk_status_t status = BLK_STS_OK;
1236
1237 if (!mmc_can_erase(card)) {
1238 status = BLK_STS_NOTSUPP;
1239 goto fail;
1240 }
1241
1242 from = blk_rq_pos(req);
1243 nr = blk_rq_sectors(req);
1244
1245 if (mmc_can_discard(card))
1246 arg = MMC_DISCARD_ARG;
1247 else if (mmc_can_trim(card))
1248 arg = MMC_TRIM_ARG;
1249 else
1250 arg = MMC_ERASE_ARG;
1251 do {
1252 err = 0;
1253 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1254 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1255 INAND_CMD38_ARG_EXT_CSD,
1256 arg == MMC_TRIM_ARG ?
1257 INAND_CMD38_ARG_TRIM :
1258 INAND_CMD38_ARG_ERASE,
1259 0);
1260 }
1261 if (!err)
1262 err = mmc_erase(card, from, nr, arg);
1263 } while (err == -EIO && !mmc_blk_reset(md, card->host, type));
1264 if (err)
1265 status = BLK_STS_IOERR;
1266 else
1267 mmc_blk_reset_success(md, type);
1268fail:
1269 blk_end_request(req, status, blk_rq_bytes(req));
1270}
1271
1272static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
1273 struct request *req)
1274{
1275 struct mmc_blk_data *md = mq->blkdata;
1276 struct mmc_card *card = md->queue.card;
1277 unsigned int from, nr, arg;
1278 int err = 0, type = MMC_BLK_SECDISCARD;
1279 blk_status_t status = BLK_STS_OK;
1280
1281 if (!(mmc_can_secure_erase_trim(card))) {
1282 status = BLK_STS_NOTSUPP;
1283 goto out;
1284 }
1285
1286 from = blk_rq_pos(req);
1287 nr = blk_rq_sectors(req);
1288
1289 if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
1290 arg = MMC_SECURE_TRIM1_ARG;
1291 else
1292 arg = MMC_SECURE_ERASE_ARG;
1293
1294retry:
1295 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1296 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1297 INAND_CMD38_ARG_EXT_CSD,
1298 arg == MMC_SECURE_TRIM1_ARG ?
1299 INAND_CMD38_ARG_SECTRIM1 :
1300 INAND_CMD38_ARG_SECERASE,
1301 0);
1302 if (err)
1303 goto out_retry;
1304 }
1305
1306 err = mmc_erase(card, from, nr, arg);
1307 if (err == -EIO)
1308 goto out_retry;
1309 if (err) {
1310 status = BLK_STS_IOERR;
1311 goto out;
1312 }
1313
1314 if (arg == MMC_SECURE_TRIM1_ARG) {
1315 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1316 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1317 INAND_CMD38_ARG_EXT_CSD,
1318 INAND_CMD38_ARG_SECTRIM2,
1319 0);
1320 if (err)
1321 goto out_retry;
1322 }
1323
1324 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
1325 if (err == -EIO)
1326 goto out_retry;
1327 if (err) {
1328 status = BLK_STS_IOERR;
1329 goto out;
1330 }
1331 }
1332
1333out_retry:
1334 if (err && !mmc_blk_reset(md, card->host, type))
1335 goto retry;
1336 if (!err)
1337 mmc_blk_reset_success(md, type);
1338out:
1339 blk_end_request(req, status, blk_rq_bytes(req));
1340}
1341
1342static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
1343{
1344 struct mmc_blk_data *md = mq->blkdata;
1345 struct mmc_card *card = md->queue.card;
1346 int ret = 0;
1347
1348 ret = mmc_flush_cache(card);
1349 blk_end_request_all(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
1350}
1351
1352/*
1353 * Reformat current write as a reliable write, supporting
1354 * both legacy and the enhanced reliable write MMC cards.
1355 * In each transfer we'll handle only as much as a single
1356 * reliable write can handle, thus finish the request in
1357 * partial completions.
1358 */
1359static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
1360 struct mmc_card *card,
1361 struct request *req)
1362{
1363 if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
1364 /* Legacy mode imposes restrictions on transfers. */
1365 if (!IS_ALIGNED(blk_rq_pos(req), card->ext_csd.rel_sectors))
1366 brq->data.blocks = 1;
1367
1368 if (brq->data.blocks > card->ext_csd.rel_sectors)
1369 brq->data.blocks = card->ext_csd.rel_sectors;
1370 else if (brq->data.blocks < card->ext_csd.rel_sectors)
1371 brq->data.blocks = 1;
1372 }
1373}
1374
1375#define CMD_ERRORS \
1376 (R1_OUT_OF_RANGE | /* Command argument out of range */ \
1377 R1_ADDRESS_ERROR | /* Misaligned address */ \
1378 R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\
1379 R1_WP_VIOLATION | /* Tried to write to protected block */ \
1380 R1_CARD_ECC_FAILED | /* Card ECC failed */ \
1381 R1_CC_ERROR | /* Card controller error */ \
1382 R1_ERROR) /* General/unknown error */
1383
1384static void mmc_blk_eval_resp_error(struct mmc_blk_request *brq)
1385{
1386 u32 val;
1387
1388 /*
1389 * Per the SD specification(physical layer version 4.10)[1],
1390 * section 4.3.3, it explicitly states that "When the last
1391 * block of user area is read using CMD18, the host should
1392 * ignore OUT_OF_RANGE error that may occur even the sequence
1393 * is correct". And JESD84-B51 for eMMC also has a similar
1394 * statement on section 6.8.3.
1395 *
1396 * Multiple block read/write could be done by either predefined
1397 * method, namely CMD23, or open-ending mode. For open-ending mode,
1398 * we should ignore the OUT_OF_RANGE error as it's normal behaviour.
1399 *
1400 * However the spec[1] doesn't tell us whether we should also
1401 * ignore that for predefined method. But per the spec[1], section
1402 * 4.15 Set Block Count Command, it says"If illegal block count
1403 * is set, out of range error will be indicated during read/write
1404 * operation (For example, data transfer is stopped at user area
1405 * boundary)." In another word, we could expect a out of range error
1406 * in the response for the following CMD18/25. And if argument of
1407 * CMD23 + the argument of CMD18/25 exceed the max number of blocks,
1408 * we could also expect to get a -ETIMEDOUT or any error number from
1409 * the host drivers due to missing data response(for write)/data(for
1410 * read), as the cards will stop the data transfer by itself per the
1411 * spec. So we only need to check R1_OUT_OF_RANGE for open-ending mode.
1412 */
1413
1414 if (!brq->stop.error) {
1415 bool oor_with_open_end;
1416 /* If there is no error yet, check R1 response */
1417
1418 val = brq->stop.resp[0] & CMD_ERRORS;
1419 oor_with_open_end = val & R1_OUT_OF_RANGE && !brq->mrq.sbc;
1420
1421 if (val && !oor_with_open_end)
1422 brq->stop.error = -EIO;
1423 }
1424}
1425
1426static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card,
1427 struct mmc_async_req *areq)
1428{
1429 struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
1430 areq);
1431 struct mmc_blk_request *brq = &mq_mrq->brq;
1432 struct request *req = mmc_queue_req_to_req(mq_mrq);
1433 int need_retune = card->host->need_retune;
1434 bool ecc_err = false;
1435 bool gen_err = false;
1436
1437 /*
1438 * sbc.error indicates a problem with the set block count
1439 * command. No data will have been transferred.
1440 *
1441 * cmd.error indicates a problem with the r/w command. No
1442 * data will have been transferred.
1443 *
1444 * stop.error indicates a problem with the stop command. Data
1445 * may have been transferred, or may still be transferring.
1446 */
1447
1448 mmc_blk_eval_resp_error(brq);
1449
1450 if (brq->sbc.error || brq->cmd.error ||
1451 brq->stop.error || brq->data.error) {
1452 switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
1453 case ERR_RETRY:
1454 return MMC_BLK_RETRY;
1455 case ERR_ABORT:
1456 return MMC_BLK_ABORT;
1457 case ERR_NOMEDIUM:
1458 return MMC_BLK_NOMEDIUM;
1459 case ERR_CONTINUE:
1460 break;
1461 }
1462 }
1463
1464 /*
1465 * Check for errors relating to the execution of the
1466 * initial command - such as address errors. No data
1467 * has been transferred.
1468 */
1469 if (brq->cmd.resp[0] & CMD_ERRORS) {
1470 pr_err("%s: r/w command failed, status = %#x\n",
1471 req->rq_disk->disk_name, brq->cmd.resp[0]);
1472 return MMC_BLK_ABORT;
1473 }
1474
1475 /*
1476 * Everything else is either success, or a data error of some
1477 * kind. If it was a write, we may have transitioned to
1478 * program mode, which we have to wait for it to complete.
1479 */
1480 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
1481 int err;
1482
1483 /* Check stop command response */
1484 if (brq->stop.resp[0] & R1_ERROR) {
1485 pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
1486 req->rq_disk->disk_name, __func__,
1487 brq->stop.resp[0]);
1488 gen_err = true;
1489 }
1490
1491 err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, false, req,
1492 &gen_err);
1493 if (err)
1494 return MMC_BLK_CMD_ERR;
1495 }
1496
1497 /* if general error occurs, retry the write operation. */
1498 if (gen_err) {
1499 pr_warn("%s: retrying write for general error\n",
1500 req->rq_disk->disk_name);
1501 return MMC_BLK_RETRY;
1502 }
1503
1504 /* Some errors (ECC) are flagged on the next commmand, so check stop, too */
1505 if (brq->data.error || brq->stop.error) {
1506 if (need_retune && !brq->retune_retry_done) {
1507 pr_debug("%s: retrying because a re-tune was needed\n",
1508 req->rq_disk->disk_name);
1509 brq->retune_retry_done = 1;
1510 return MMC_BLK_RETRY;
1511 }
1512 pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
1513 req->rq_disk->disk_name, brq->data.error ?: brq->stop.error,
1514 (unsigned)blk_rq_pos(req),
1515 (unsigned)blk_rq_sectors(req),
1516 brq->cmd.resp[0], brq->stop.resp[0]);
1517
1518 if (rq_data_dir(req) == READ) {
1519 if (ecc_err)
1520 return MMC_BLK_ECC_ERR;
1521 return MMC_BLK_DATA_ERR;
1522 } else {
1523 return MMC_BLK_CMD_ERR;
1524 }
1525 }
1526
1527 if (!brq->data.bytes_xfered)
1528 return MMC_BLK_RETRY;
1529
1530 if (blk_rq_bytes(req) != brq->data.bytes_xfered)
1531 return MMC_BLK_PARTIAL;
1532
1533 return MMC_BLK_SUCCESS;
1534}
1535
1536static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
1537 int disable_multi, bool *do_rel_wr,
1538 bool *do_data_tag)
1539{
1540 struct mmc_blk_data *md = mq->blkdata;
1541 struct mmc_card *card = md->queue.card;
1542 struct mmc_blk_request *brq = &mqrq->brq;
1543 struct request *req = mmc_queue_req_to_req(mqrq);
1544
1545 /*
1546 * Reliable writes are used to implement Forced Unit Access and
1547 * are supported only on MMCs.
1548 */
1549 *do_rel_wr = (req->cmd_flags & REQ_FUA) &&
1550 rq_data_dir(req) == WRITE &&
1551 (md->flags & MMC_BLK_REL_WR);
1552
1553 memset(brq, 0, sizeof(struct mmc_blk_request));
1554
1555 brq->mrq.data = &brq->data;
1556
1557 brq->stop.opcode = MMC_STOP_TRANSMISSION;
1558 brq->stop.arg = 0;
1559
1560 if (rq_data_dir(req) == READ) {
1561 brq->data.flags = MMC_DATA_READ;
1562 brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1563 } else {
1564 brq->data.flags = MMC_DATA_WRITE;
1565 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1566 }
1567
1568 brq->data.blksz = 512;
1569 brq->data.blocks = blk_rq_sectors(req);
1570
1571 /*
1572 * The block layer doesn't support all sector count
1573 * restrictions, so we need to be prepared for too big
1574 * requests.
1575 */
1576 if (brq->data.blocks > card->host->max_blk_count)
1577 brq->data.blocks = card->host->max_blk_count;
1578
1579 if (brq->data.blocks > 1) {
1580 /*
1581 * After a read error, we redo the request one sector
1582 * at a time in order to accurately determine which
1583 * sectors can be read successfully.
1584 */
1585 if (disable_multi)
1586 brq->data.blocks = 1;
1587
1588 /*
1589 * Some controllers have HW issues while operating
1590 * in multiple I/O mode
1591 */
1592 if (card->host->ops->multi_io_quirk)
1593 brq->data.blocks = card->host->ops->multi_io_quirk(card,
1594 (rq_data_dir(req) == READ) ?
1595 MMC_DATA_READ : MMC_DATA_WRITE,
1596 brq->data.blocks);
1597 }
1598
1599 if (*do_rel_wr)
1600 mmc_apply_rel_rw(brq, card, req);
1601
1602 /*
1603 * Data tag is used only during writing meta data to speed
1604 * up write and any subsequent read of this meta data
1605 */
1606 *do_data_tag = card->ext_csd.data_tag_unit_size &&
1607 (req->cmd_flags & REQ_META) &&
1608 (rq_data_dir(req) == WRITE) &&
1609 ((brq->data.blocks * brq->data.blksz) >=
1610 card->ext_csd.data_tag_unit_size);
1611
1612 mmc_set_data_timeout(&brq->data, card);
1613
1614 brq->data.sg = mqrq->sg;
1615 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
1616
1617 /*
1618 * Adjust the sg list so it is the same size as the
1619 * request.
1620 */
1621 if (brq->data.blocks != blk_rq_sectors(req)) {
1622 int i, data_size = brq->data.blocks << 9;
1623 struct scatterlist *sg;
1624
1625 for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
1626 data_size -= sg->length;
1627 if (data_size <= 0) {
1628 sg->length += data_size;
1629 i++;
1630 break;
1631 }
1632 }
1633 brq->data.sg_len = i;
1634 }
1635
1636 mqrq->areq.mrq = &brq->mrq;
1637}
1638
1639static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
1640 struct mmc_card *card,
1641 int disable_multi,
1642 struct mmc_queue *mq)
1643{
1644 u32 readcmd, writecmd;
1645 struct mmc_blk_request *brq = &mqrq->brq;
1646 struct request *req = mmc_queue_req_to_req(mqrq);
1647 struct mmc_blk_data *md = mq->blkdata;
1648 bool do_rel_wr, do_data_tag;
1649
1650 mmc_blk_data_prep(mq, mqrq, disable_multi, &do_rel_wr, &do_data_tag);
1651
1652 brq->mrq.cmd = &brq->cmd;
1653
1654 brq->cmd.arg = blk_rq_pos(req);
1655 if (!mmc_card_blockaddr(card))
1656 brq->cmd.arg <<= 9;
1657 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
1658
1659 if (brq->data.blocks > 1 || do_rel_wr) {
1660 /* SPI multiblock writes terminate using a special
1661 * token, not a STOP_TRANSMISSION request.
1662 */
1663 if (!mmc_host_is_spi(card->host) ||
1664 rq_data_dir(req) == READ)
1665 brq->mrq.stop = &brq->stop;
1666 readcmd = MMC_READ_MULTIPLE_BLOCK;
1667 writecmd = MMC_WRITE_MULTIPLE_BLOCK;
1668 } else {
1669 brq->mrq.stop = NULL;
1670 readcmd = MMC_READ_SINGLE_BLOCK;
1671 writecmd = MMC_WRITE_BLOCK;
1672 }
1673 brq->cmd.opcode = rq_data_dir(req) == READ ? readcmd : writecmd;
1674
1675 /*
1676 * Pre-defined multi-block transfers are preferable to
1677 * open ended-ones (and necessary for reliable writes).
1678 * However, it is not sufficient to just send CMD23,
1679 * and avoid the final CMD12, as on an error condition
1680 * CMD12 (stop) needs to be sent anyway. This, coupled
1681 * with Auto-CMD23 enhancements provided by some
1682 * hosts, means that the complexity of dealing
1683 * with this is best left to the host. If CMD23 is
1684 * supported by card and host, we'll fill sbc in and let
1685 * the host deal with handling it correctly. This means
1686 * that for hosts that don't expose MMC_CAP_CMD23, no
1687 * change of behavior will be observed.
1688 *
1689 * N.B: Some MMC cards experience perf degradation.
1690 * We'll avoid using CMD23-bounded multiblock writes for
1691 * these, while retaining features like reliable writes.
1692 */
1693 if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) &&
1694 (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) ||
1695 do_data_tag)) {
1696 brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
1697 brq->sbc.arg = brq->data.blocks |
1698 (do_rel_wr ? (1 << 31) : 0) |
1699 (do_data_tag ? (1 << 29) : 0);
1700 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
1701 brq->mrq.sbc = &brq->sbc;
1702 }
1703
1704 mqrq->areq.err_check = mmc_blk_err_check;
1705}
1706
1707static bool mmc_blk_rw_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
1708 struct mmc_blk_request *brq, struct request *req,
1709 bool old_req_pending)
1710{
1711 bool req_pending;
1712
1713 /*
1714 * If this is an SD card and we're writing, we can first
1715 * mark the known good sectors as ok.
1716 *
1717 * If the card is not SD, we can still ok written sectors
1718 * as reported by the controller (which might be less than
1719 * the real number of written sectors, but never more).
1720 */
1721 if (mmc_card_sd(card)) {
1722 u32 blocks;
1723 int err;
1724
1725 err = mmc_sd_num_wr_blocks(card, &blocks);
1726 if (err)
1727 req_pending = old_req_pending;
1728 else
1729 req_pending = blk_end_request(req, BLK_STS_OK, blocks << 9);
1730 } else {
1731 req_pending = blk_end_request(req, BLK_STS_OK, brq->data.bytes_xfered);
1732 }
1733 return req_pending;
1734}
1735
1736static void mmc_blk_rw_cmd_abort(struct mmc_queue *mq, struct mmc_card *card,
1737 struct request *req,
1738 struct mmc_queue_req *mqrq)
1739{
1740 if (mmc_card_removed(card))
1741 req->rq_flags |= RQF_QUIET;
1742 while (blk_end_request(req, BLK_STS_IOERR, blk_rq_cur_bytes(req)));
1743 mq->qcnt--;
1744}
1745
1746/**
1747 * mmc_blk_rw_try_restart() - tries to restart the current async request
1748 * @mq: the queue with the card and host to restart
1749 * @req: a new request that want to be started after the current one
1750 */
1751static void mmc_blk_rw_try_restart(struct mmc_queue *mq, struct request *req,
1752 struct mmc_queue_req *mqrq)
1753{
1754 if (!req)
1755 return;
1756
1757 /*
1758 * If the card was removed, just cancel everything and return.
1759 */
1760 if (mmc_card_removed(mq->card)) {
1761 req->rq_flags |= RQF_QUIET;
1762 blk_end_request_all(req, BLK_STS_IOERR);
1763 mq->qcnt--; /* FIXME: just set to 0? */
1764 return;
1765 }
1766 /* Else proceed and try to restart the current async request */
1767 mmc_blk_rw_rq_prep(mqrq, mq->card, 0, mq);
1768 mmc_start_areq(mq->card->host, &mqrq->areq, NULL);
1769}
1770
1771static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
1772{
1773 struct mmc_blk_data *md = mq->blkdata;
1774 struct mmc_card *card = md->queue.card;
1775 struct mmc_blk_request *brq;
1776 int disable_multi = 0, retry = 0, type, retune_retry_done = 0;
1777 enum mmc_blk_status status;
1778 struct mmc_queue_req *mqrq_cur = NULL;
1779 struct mmc_queue_req *mq_rq;
1780 struct request *old_req;
1781 struct mmc_async_req *new_areq;
1782 struct mmc_async_req *old_areq;
1783 bool req_pending = true;
1784
1785 if (new_req) {
1786 mqrq_cur = req_to_mmc_queue_req(new_req);
1787 mq->qcnt++;
1788 }
1789
1790 if (!mq->qcnt)
1791 return;
1792
1793 do {
1794 if (new_req) {
1795 /*
1796 * When 4KB native sector is enabled, only 8 blocks
1797 * multiple read or write is allowed
1798 */
1799 if (mmc_large_sector(card) &&
1800 !IS_ALIGNED(blk_rq_sectors(new_req), 8)) {
1801 pr_err("%s: Transfer size is not 4KB sector size aligned\n",
1802 new_req->rq_disk->disk_name);
1803 mmc_blk_rw_cmd_abort(mq, card, new_req, mqrq_cur);
1804 return;
1805 }
1806
1807 mmc_blk_rw_rq_prep(mqrq_cur, card, 0, mq);
1808 new_areq = &mqrq_cur->areq;
1809 } else
1810 new_areq = NULL;
1811
1812 old_areq = mmc_start_areq(card->host, new_areq, &status);
1813 if (!old_areq) {
1814 /*
1815 * We have just put the first request into the pipeline
1816 * and there is nothing more to do until it is
1817 * complete.
1818 */
1819 return;
1820 }
1821
1822 /*
1823 * An asynchronous request has been completed and we proceed
1824 * to handle the result of it.
1825 */
1826 mq_rq = container_of(old_areq, struct mmc_queue_req, areq);
1827 brq = &mq_rq->brq;
1828 old_req = mmc_queue_req_to_req(mq_rq);
1829 type = rq_data_dir(old_req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
1830
1831 switch (status) {
1832 case MMC_BLK_SUCCESS:
1833 case MMC_BLK_PARTIAL:
1834 /*
1835 * A block was successfully transferred.
1836 */
1837 mmc_blk_reset_success(md, type);
1838
1839 req_pending = blk_end_request(old_req, BLK_STS_OK,
1840 brq->data.bytes_xfered);
1841 /*
1842 * If the blk_end_request function returns non-zero even
1843 * though all data has been transferred and no errors
1844 * were returned by the host controller, it's a bug.
1845 */
1846 if (status == MMC_BLK_SUCCESS && req_pending) {
1847 pr_err("%s BUG rq_tot %d d_xfer %d\n",
1848 __func__, blk_rq_bytes(old_req),
1849 brq->data.bytes_xfered);
1850 mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
1851 return;
1852 }
1853 break;
1854 case MMC_BLK_CMD_ERR:
1855 req_pending = mmc_blk_rw_cmd_err(md, card, brq, old_req, req_pending);
1856 if (mmc_blk_reset(md, card->host, type)) {
1857 if (req_pending)
1858 mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
1859 else
1860 mq->qcnt--;
1861 mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
1862 return;
1863 }
1864 if (!req_pending) {
1865 mq->qcnt--;
1866 mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
1867 return;
1868 }
1869 break;
1870 case MMC_BLK_RETRY:
1871 retune_retry_done = brq->retune_retry_done;
1872 if (retry++ < 5)
1873 break;
1874 /* Fall through */
1875 case MMC_BLK_ABORT:
1876 if (!mmc_blk_reset(md, card->host, type))
1877 break;
1878 mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
1879 mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
1880 return;
1881 case MMC_BLK_DATA_ERR: {
1882 int err;
1883
1884 err = mmc_blk_reset(md, card->host, type);
1885 if (!err)
1886 break;
1887 if (err == -ENODEV) {
1888 mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
1889 mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
1890 return;
1891 }
1892 /* Fall through */
1893 }
1894 case MMC_BLK_ECC_ERR:
1895 if (brq->data.blocks > 1) {
1896 /* Redo read one sector at a time */
1897 pr_warn("%s: retrying using single block read\n",
1898 old_req->rq_disk->disk_name);
1899 disable_multi = 1;
1900 break;
1901 }
1902 /*
1903 * After an error, we redo I/O one sector at a
1904 * time, so we only reach here after trying to
1905 * read a single sector.
1906 */
1907 req_pending = blk_end_request(old_req, BLK_STS_IOERR,
1908 brq->data.blksz);
1909 if (!req_pending) {
1910 mq->qcnt--;
1911 mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
1912 return;
1913 }
1914 break;
1915 case MMC_BLK_NOMEDIUM:
1916 mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
1917 mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
1918 return;
1919 default:
1920 pr_err("%s: Unhandled return value (%d)",
1921 old_req->rq_disk->disk_name, status);
1922 mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
1923 mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
1924 return;
1925 }
1926
1927 if (req_pending) {
1928 /*
1929 * In case of a incomplete request
1930 * prepare it again and resend.
1931 */
1932 mmc_blk_rw_rq_prep(mq_rq, card,
1933 disable_multi, mq);
1934 mmc_start_areq(card->host,
1935 &mq_rq->areq, NULL);
1936 mq_rq->brq.retune_retry_done = retune_retry_done;
1937 }
1938 } while (req_pending);
1939
1940 mq->qcnt--;
1941}
1942
1943void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
1944{
1945 int ret;
1946 struct mmc_blk_data *md = mq->blkdata;
1947 struct mmc_card *card = md->queue.card;
1948
1949 if (req && !mq->qcnt)
1950 /* claim host only for the first request */
1951 mmc_get_card(card);
1952
1953 ret = mmc_blk_part_switch(card, md->part_type);
1954 if (ret) {
1955 if (req) {
1956 blk_end_request_all(req, BLK_STS_IOERR);
1957 }
1958 goto out;
1959 }
1960
1961 if (req) {
1962 switch (req_op(req)) {
1963 case REQ_OP_DRV_IN:
1964 case REQ_OP_DRV_OUT:
1965 /*
1966 * Complete ongoing async transfer before issuing
1967 * ioctl()s
1968 */
1969 if (mq->qcnt)
1970 mmc_blk_issue_rw_rq(mq, NULL);
1971 mmc_blk_issue_drv_op(mq, req);
1972 break;
1973 case REQ_OP_DISCARD:
1974 /*
1975 * Complete ongoing async transfer before issuing
1976 * discard.
1977 */
1978 if (mq->qcnt)
1979 mmc_blk_issue_rw_rq(mq, NULL);
1980 mmc_blk_issue_discard_rq(mq, req);
1981 break;
1982 case REQ_OP_SECURE_ERASE:
1983 /*
1984 * Complete ongoing async transfer before issuing
1985 * secure erase.
1986 */
1987 if (mq->qcnt)
1988 mmc_blk_issue_rw_rq(mq, NULL);
1989 mmc_blk_issue_secdiscard_rq(mq, req);
1990 break;
1991 case REQ_OP_FLUSH:
1992 /*
1993 * Complete ongoing async transfer before issuing
1994 * flush.
1995 */
1996 if (mq->qcnt)
1997 mmc_blk_issue_rw_rq(mq, NULL);
1998 mmc_blk_issue_flush(mq, req);
1999 break;
2000 default:
2001 /* Normal request, just issue it */
2002 mmc_blk_issue_rw_rq(mq, req);
2003 card->host->context_info.is_waiting_last_req = false;
2004 break;
2005 }
2006 } else {
2007 /* No request, flushing the pipeline with NULL */
2008 mmc_blk_issue_rw_rq(mq, NULL);
2009 card->host->context_info.is_waiting_last_req = false;
2010 }
2011
2012out:
2013 if (!mq->qcnt)
2014 mmc_put_card(card);
2015}
2016
2017static inline int mmc_blk_readonly(struct mmc_card *card)
2018{
2019 return mmc_card_readonly(card) ||
2020 !(card->csd.cmdclass & CCC_BLOCK_WRITE);
2021}
2022
2023static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
2024 struct device *parent,
2025 sector_t size,
2026 bool default_ro,
2027 const char *subname,
2028 int area_type)
2029{
2030 struct mmc_blk_data *md;
2031 int devidx, ret;
2032
2033 devidx = ida_simple_get(&mmc_blk_ida, 0, max_devices, GFP_KERNEL);
2034 if (devidx < 0) {
2035 /*
2036 * We get -ENOSPC because there are no more any available
2037 * devidx. The reason may be that, either userspace haven't yet
2038 * unmounted the partitions, which postpones mmc_blk_release()
2039 * from being called, or the device has more partitions than
2040 * what we support.
2041 */
2042 if (devidx == -ENOSPC)
2043 dev_err(mmc_dev(card->host),
2044 "no more device IDs available\n");
2045
2046 return ERR_PTR(devidx);
2047 }
2048
2049 md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
2050 if (!md) {
2051 ret = -ENOMEM;
2052 goto out;
2053 }
2054
2055 md->area_type = area_type;
2056
2057 /*
2058 * Set the read-only status based on the supported commands
2059 * and the write protect switch.
2060 */
2061 md->read_only = mmc_blk_readonly(card);
2062
2063 md->disk = alloc_disk(perdev_minors);
2064 if (md->disk == NULL) {
2065 ret = -ENOMEM;
2066 goto err_kfree;
2067 }
2068
2069 spin_lock_init(&md->lock);
2070 INIT_LIST_HEAD(&md->part);
2071 md->usage = 1;
2072
2073 ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
2074 if (ret)
2075 goto err_putdisk;
2076
2077 md->queue.blkdata = md;
2078
2079 md->disk->major = MMC_BLOCK_MAJOR;
2080 md->disk->first_minor = devidx * perdev_minors;
2081 md->disk->fops = &mmc_bdops;
2082 md->disk->private_data = md;
2083 md->disk->queue = md->queue.queue;
2084 md->parent = parent;
2085 set_disk_ro(md->disk, md->read_only || default_ro);
2086 md->disk->flags = GENHD_FL_EXT_DEVT;
2087 if (area_type & (MMC_BLK_DATA_AREA_RPMB | MMC_BLK_DATA_AREA_BOOT))
2088 md->disk->flags |= GENHD_FL_NO_PART_SCAN;
2089
2090 /*
2091 * As discussed on lkml, GENHD_FL_REMOVABLE should:
2092 *
2093 * - be set for removable media with permanent block devices
2094 * - be unset for removable block devices with permanent media
2095 *
2096 * Since MMC block devices clearly fall under the second
2097 * case, we do not set GENHD_FL_REMOVABLE. Userspace
2098 * should use the block device creation/destruction hotplug
2099 * messages to tell when the card is present.
2100 */
2101
2102 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
2103 "mmcblk%u%s", card->host->index, subname ? subname : "");
2104
2105 if (mmc_card_mmc(card))
2106 blk_queue_logical_block_size(md->queue.queue,
2107 card->ext_csd.data_sector_size);
2108 else
2109 blk_queue_logical_block_size(md->queue.queue, 512);
2110
2111 set_capacity(md->disk, size);
2112
2113 if (mmc_host_cmd23(card->host)) {
2114 if ((mmc_card_mmc(card) &&
2115 card->csd.mmca_vsn >= CSD_SPEC_VER_3) ||
2116 (mmc_card_sd(card) &&
2117 card->scr.cmds & SD_SCR_CMD23_SUPPORT))
2118 md->flags |= MMC_BLK_CMD23;
2119 }
2120
2121 if (mmc_card_mmc(card) &&
2122 md->flags & MMC_BLK_CMD23 &&
2123 ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
2124 card->ext_csd.rel_sectors)) {
2125 md->flags |= MMC_BLK_REL_WR;
2126 blk_queue_write_cache(md->queue.queue, true, true);
2127 }
2128
2129 return md;
2130
2131 err_putdisk:
2132 put_disk(md->disk);
2133 err_kfree:
2134 kfree(md);
2135 out:
2136 ida_simple_remove(&mmc_blk_ida, devidx);
2137 return ERR_PTR(ret);
2138}
2139
2140static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
2141{
2142 sector_t size;
2143
2144 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
2145 /*
2146 * The EXT_CSD sector count is in number or 512 byte
2147 * sectors.
2148 */
2149 size = card->ext_csd.sectors;
2150 } else {
2151 /*
2152 * The CSD capacity field is in units of read_blkbits.
2153 * set_capacity takes units of 512 bytes.
2154 */
2155 size = (typeof(sector_t))card->csd.capacity
2156 << (card->csd.read_blkbits - 9);
2157 }
2158
2159 return mmc_blk_alloc_req(card, &card->dev, size, false, NULL,
2160 MMC_BLK_DATA_AREA_MAIN);
2161}
2162
2163static int mmc_blk_alloc_part(struct mmc_card *card,
2164 struct mmc_blk_data *md,
2165 unsigned int part_type,
2166 sector_t size,
2167 bool default_ro,
2168 const char *subname,
2169 int area_type)
2170{
2171 char cap_str[10];
2172 struct mmc_blk_data *part_md;
2173
2174 part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
2175 subname, area_type);
2176 if (IS_ERR(part_md))
2177 return PTR_ERR(part_md);
2178 part_md->part_type = part_type;
2179 list_add(&part_md->part, &md->part);
2180
2181 string_get_size((u64)get_capacity(part_md->disk), 512, STRING_UNITS_2,
2182 cap_str, sizeof(cap_str));
2183 pr_info("%s: %s %s partition %u %s\n",
2184 part_md->disk->disk_name, mmc_card_id(card),
2185 mmc_card_name(card), part_md->part_type, cap_str);
2186 return 0;
2187}
2188
2189/* MMC Physical partitions consist of two boot partitions and
2190 * up to four general purpose partitions.
2191 * For each partition enabled in EXT_CSD a block device will be allocatedi
2192 * to provide access to the partition.
2193 */
2194
2195static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
2196{
2197 int idx, ret = 0;
2198
2199 if (!mmc_card_mmc(card))
2200 return 0;
2201
2202 for (idx = 0; idx < card->nr_parts; idx++) {
2203 if (card->part[idx].size) {
2204 ret = mmc_blk_alloc_part(card, md,
2205 card->part[idx].part_cfg,
2206 card->part[idx].size >> 9,
2207 card->part[idx].force_ro,
2208 card->part[idx].name,
2209 card->part[idx].area_type);
2210 if (ret)
2211 return ret;
2212 }
2213 }
2214
2215 return ret;
2216}
2217
2218static void mmc_blk_remove_req(struct mmc_blk_data *md)
2219{
2220 struct mmc_card *card;
2221
2222 if (md) {
2223 /*
2224 * Flush remaining requests and free queues. It
2225 * is freeing the queue that stops new requests
2226 * from being accepted.
2227 */
2228 card = md->queue.card;
2229 spin_lock_irq(md->queue.queue->queue_lock);
2230 queue_flag_set(QUEUE_FLAG_BYPASS, md->queue.queue);
2231 spin_unlock_irq(md->queue.queue->queue_lock);
2232 blk_set_queue_dying(md->queue.queue);
2233 mmc_cleanup_queue(&md->queue);
2234 if (md->disk->flags & GENHD_FL_UP) {
2235 device_remove_file(disk_to_dev(md->disk), &md->force_ro);
2236 if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
2237 card->ext_csd.boot_ro_lockable)
2238 device_remove_file(disk_to_dev(md->disk),
2239 &md->power_ro_lock);
2240
2241 del_gendisk(md->disk);
2242 }
2243 mmc_blk_put(md);
2244 }
2245}
2246
2247static void mmc_blk_remove_parts(struct mmc_card *card,
2248 struct mmc_blk_data *md)
2249{
2250 struct list_head *pos, *q;
2251 struct mmc_blk_data *part_md;
2252
2253 list_for_each_safe(pos, q, &md->part) {
2254 part_md = list_entry(pos, struct mmc_blk_data, part);
2255 list_del(pos);
2256 mmc_blk_remove_req(part_md);
2257 }
2258}
2259
2260static int mmc_add_disk(struct mmc_blk_data *md)
2261{
2262 int ret;
2263 struct mmc_card *card = md->queue.card;
2264
2265 device_add_disk(md->parent, md->disk);
2266 md->force_ro.show = force_ro_show;
2267 md->force_ro.store = force_ro_store;
2268 sysfs_attr_init(&md->force_ro.attr);
2269 md->force_ro.attr.name = "force_ro";
2270 md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
2271 ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
2272 if (ret)
2273 goto force_ro_fail;
2274
2275 if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
2276 card->ext_csd.boot_ro_lockable) {
2277 umode_t mode;
2278
2279 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS)
2280 mode = S_IRUGO;
2281 else
2282 mode = S_IRUGO | S_IWUSR;
2283
2284 md->power_ro_lock.show = power_ro_lock_show;
2285 md->power_ro_lock.store = power_ro_lock_store;
2286 sysfs_attr_init(&md->power_ro_lock.attr);
2287 md->power_ro_lock.attr.mode = mode;
2288 md->power_ro_lock.attr.name =
2289 "ro_lock_until_next_power_on";
2290 ret = device_create_file(disk_to_dev(md->disk),
2291 &md->power_ro_lock);
2292 if (ret)
2293 goto power_ro_lock_fail;
2294 }
2295 return ret;
2296
2297power_ro_lock_fail:
2298 device_remove_file(disk_to_dev(md->disk), &md->force_ro);
2299force_ro_fail:
2300 del_gendisk(md->disk);
2301
2302 return ret;
2303}
2304
2305#ifdef CONFIG_DEBUG_FS
2306
2307static int mmc_dbg_card_status_get(void *data, u64 *val)
2308{
2309 struct mmc_card *card = data;
2310 struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
2311 struct mmc_queue *mq = &md->queue;
2312 struct request *req;
2313 int ret;
2314
2315 /* Ask the block layer about the card status */
2316 req = blk_get_request(mq->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
2317 req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS;
2318 blk_execute_rq(mq->queue, NULL, req, 0);
2319 ret = req_to_mmc_queue_req(req)->drv_op_result;
2320 if (ret >= 0) {
2321 *val = ret;
2322 ret = 0;
2323 }
2324
2325 return ret;
2326}
2327DEFINE_SIMPLE_ATTRIBUTE(mmc_dbg_card_status_fops, mmc_dbg_card_status_get,
2328 NULL, "%08llx\n");
2329
2330/* That is two digits * 512 + 1 for newline */
2331#define EXT_CSD_STR_LEN 1025
2332
2333static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
2334{
2335 struct mmc_card *card = inode->i_private;
2336 struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
2337 struct mmc_queue *mq = &md->queue;
2338 struct request *req;
2339 char *buf;
2340 ssize_t n = 0;
2341 u8 *ext_csd;
2342 int err, i;
2343
2344 buf = kmalloc(EXT_CSD_STR_LEN + 1, GFP_KERNEL);
2345 if (!buf)
2346 return -ENOMEM;
2347
2348 /* Ask the block layer for the EXT CSD */
2349 req = blk_get_request(mq->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
2350 req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_EXT_CSD;
2351 req_to_mmc_queue_req(req)->drv_op_data = &ext_csd;
2352 blk_execute_rq(mq->queue, NULL, req, 0);
2353 err = req_to_mmc_queue_req(req)->drv_op_result;
2354 if (err) {
2355 pr_err("FAILED %d\n", err);
2356 goto out_free;
2357 }
2358
2359 for (i = 0; i < 512; i++)
2360 n += sprintf(buf + n, "%02x", ext_csd[i]);
2361 n += sprintf(buf + n, "\n");
2362
2363 if (n != EXT_CSD_STR_LEN) {
2364 err = -EINVAL;
2365 goto out_free;
2366 }
2367
2368 filp->private_data = buf;
2369 kfree(ext_csd);
2370 return 0;
2371
2372out_free:
2373 kfree(buf);
2374 return err;
2375}
2376
2377static ssize_t mmc_ext_csd_read(struct file *filp, char __user *ubuf,
2378 size_t cnt, loff_t *ppos)
2379{
2380 char *buf = filp->private_data;
2381
2382 return simple_read_from_buffer(ubuf, cnt, ppos,
2383 buf, EXT_CSD_STR_LEN);
2384}
2385
2386static int mmc_ext_csd_release(struct inode *inode, struct file *file)
2387{
2388 kfree(file->private_data);
2389 return 0;
2390}
2391
2392static const struct file_operations mmc_dbg_ext_csd_fops = {
2393 .open = mmc_ext_csd_open,
2394 .read = mmc_ext_csd_read,
2395 .release = mmc_ext_csd_release,
2396 .llseek = default_llseek,
2397};
2398
2399static int mmc_blk_add_debugfs(struct mmc_card *card)
2400{
2401 struct dentry *root;
2402
2403 if (!card->debugfs_root)
2404 return 0;
2405
2406 root = card->debugfs_root;
2407
2408 if (mmc_card_mmc(card) || mmc_card_sd(card)) {
2409 if (!debugfs_create_file("status", S_IRUSR, root, card,
2410 &mmc_dbg_card_status_fops))
2411 return -EIO;
2412 }
2413
2414 if (mmc_card_mmc(card)) {
2415 if (!debugfs_create_file("ext_csd", S_IRUSR, root, card,
2416 &mmc_dbg_ext_csd_fops))
2417 return -EIO;
2418 }
2419
2420 return 0;
2421}
2422
2423
2424#else
2425
2426static int mmc_blk_add_debugfs(struct mmc_card *card)
2427{
2428 return 0;
2429}
2430
2431#endif /* CONFIG_DEBUG_FS */
2432
2433static int mmc_blk_probe(struct mmc_card *card)
2434{
2435 struct mmc_blk_data *md, *part_md;
2436 char cap_str[10];
2437
2438 /*
2439 * Check that the card supports the command class(es) we need.
2440 */
2441 if (!(card->csd.cmdclass & CCC_BLOCK_READ))
2442 return -ENODEV;
2443
2444 mmc_fixup_device(card, mmc_blk_fixups);
2445
2446 md = mmc_blk_alloc(card);
2447 if (IS_ERR(md))
2448 return PTR_ERR(md);
2449
2450 string_get_size((u64)get_capacity(md->disk), 512, STRING_UNITS_2,
2451 cap_str, sizeof(cap_str));
2452 pr_info("%s: %s %s %s %s\n",
2453 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
2454 cap_str, md->read_only ? "(ro)" : "");
2455
2456 if (mmc_blk_alloc_parts(card, md))
2457 goto out;
2458
2459 dev_set_drvdata(&card->dev, md);
2460
2461 if (mmc_add_disk(md))
2462 goto out;
2463
2464 list_for_each_entry(part_md, &md->part, part) {
2465 if (mmc_add_disk(part_md))
2466 goto out;
2467 }
2468
2469 /* Add two debugfs entries */
2470 mmc_blk_add_debugfs(card);
2471
2472 pm_runtime_set_autosuspend_delay(&card->dev, 3000);
2473 pm_runtime_use_autosuspend(&card->dev);
2474
2475 /*
2476 * Don't enable runtime PM for SD-combo cards here. Leave that
2477 * decision to be taken during the SDIO init sequence instead.
2478 */
2479 if (card->type != MMC_TYPE_SD_COMBO) {
2480 pm_runtime_set_active(&card->dev);
2481 pm_runtime_enable(&card->dev);
2482 }
2483
2484 return 0;
2485
2486 out:
2487 mmc_blk_remove_parts(card, md);
2488 mmc_blk_remove_req(md);
2489 return 0;
2490}
2491
2492static void mmc_blk_remove(struct mmc_card *card)
2493{
2494 struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
2495
2496 mmc_blk_remove_parts(card, md);
2497 pm_runtime_get_sync(&card->dev);
2498 mmc_claim_host(card->host);
2499 mmc_blk_part_switch(card, md->part_type);
2500 mmc_release_host(card->host);
2501 if (card->type != MMC_TYPE_SD_COMBO)
2502 pm_runtime_disable(&card->dev);
2503 pm_runtime_put_noidle(&card->dev);
2504 mmc_blk_remove_req(md);
2505 dev_set_drvdata(&card->dev, NULL);
2506}
2507
2508static int _mmc_blk_suspend(struct mmc_card *card)
2509{
2510 struct mmc_blk_data *part_md;
2511 struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
2512
2513 if (md) {
2514 mmc_queue_suspend(&md->queue);
2515 list_for_each_entry(part_md, &md->part, part) {
2516 mmc_queue_suspend(&part_md->queue);
2517 }
2518 }
2519 return 0;
2520}
2521
2522static void mmc_blk_shutdown(struct mmc_card *card)
2523{
2524 _mmc_blk_suspend(card);
2525}
2526
2527#ifdef CONFIG_PM_SLEEP
2528static int mmc_blk_suspend(struct device *dev)
2529{
2530 struct mmc_card *card = mmc_dev_to_card(dev);
2531
2532 return _mmc_blk_suspend(card);
2533}
2534
2535static int mmc_blk_resume(struct device *dev)
2536{
2537 struct mmc_blk_data *part_md;
2538 struct mmc_blk_data *md = dev_get_drvdata(dev);
2539
2540 if (md) {
2541 /*
2542 * Resume involves the card going into idle state,
2543 * so current partition is always the main one.
2544 */
2545 md->part_curr = md->part_type;
2546 mmc_queue_resume(&md->queue);
2547 list_for_each_entry(part_md, &md->part, part) {
2548 mmc_queue_resume(&part_md->queue);
2549 }
2550 }
2551 return 0;
2552}
2553#endif
2554
2555static SIMPLE_DEV_PM_OPS(mmc_blk_pm_ops, mmc_blk_suspend, mmc_blk_resume);
2556
2557static struct mmc_driver mmc_driver = {
2558 .drv = {
2559 .name = "mmcblk",
2560 .pm = &mmc_blk_pm_ops,
2561 },
2562 .probe = mmc_blk_probe,
2563 .remove = mmc_blk_remove,
2564 .shutdown = mmc_blk_shutdown,
2565};
2566
2567static int __init mmc_blk_init(void)
2568{
2569 int res;
2570
2571 if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
2572 pr_info("mmcblk: using %d minors per device\n", perdev_minors);
2573
2574 max_devices = min(MAX_DEVICES, (1 << MINORBITS) / perdev_minors);
2575
2576 res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
2577 if (res)
2578 goto out;
2579
2580 res = mmc_register_driver(&mmc_driver);
2581 if (res)
2582 goto out2;
2583
2584 return 0;
2585 out2:
2586 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
2587 out:
2588 return res;
2589}
2590
2591static void __exit mmc_blk_exit(void)
2592{
2593 mmc_unregister_driver(&mmc_driver);
2594 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
2595}
2596
2597module_init(mmc_blk_init);
2598module_exit(mmc_blk_exit);
2599
2600MODULE_LICENSE("GPL");
2601MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
2602