Merge branches 'acpi-scan', 'acpi-processor' and 'acpi-assorted'
[cascardo/linux.git] / drivers / mmc / card / block.c
1 /*
2  * Block driver for media (i.e., flash cards)
3  *
4  * Copyright 2002 Hewlett-Packard Company
5  * Copyright 2005-2008 Pierre Ossman
6  *
7  * Use consistent with the GNU GPL is permitted,
8  * provided that this copyright notice is
9  * preserved in its entirety in all copies and derived works.
10  *
11  * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
12  * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
13  * FITNESS FOR ANY PARTICULAR PURPOSE.
14  *
15  * Many thanks to Alessandro Rubini and Jonathan Corbet!
16  *
17  * Author:  Andrew Christian
18  *          28 May 2002
19  */
20 #include <linux/moduleparam.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
23
24 #include <linux/kernel.h>
25 #include <linux/fs.h>
26 #include <linux/slab.h>
27 #include <linux/errno.h>
28 #include <linux/hdreg.h>
29 #include <linux/kdev_t.h>
30 #include <linux/blkdev.h>
31 #include <linux/mutex.h>
32 #include <linux/scatterlist.h>
33 #include <linux/string_helpers.h>
34 #include <linux/delay.h>
35 #include <linux/capability.h>
36 #include <linux/compat.h>
37 #include <linux/pm_runtime.h>
38
39 #include <linux/mmc/ioctl.h>
40 #include <linux/mmc/card.h>
41 #include <linux/mmc/host.h>
42 #include <linux/mmc/mmc.h>
43 #include <linux/mmc/sd.h>
44
45 #include <asm/uaccess.h>
46
47 #include "queue.h"
48
49 MODULE_ALIAS("mmc:block");
50 #ifdef MODULE_PARAM_PREFIX
51 #undef MODULE_PARAM_PREFIX
52 #endif
53 #define MODULE_PARAM_PREFIX "mmcblk."
54
55 #define INAND_CMD38_ARG_EXT_CSD  113
56 #define INAND_CMD38_ARG_ERASE    0x00
57 #define INAND_CMD38_ARG_TRIM     0x01
58 #define INAND_CMD38_ARG_SECERASE 0x80
59 #define INAND_CMD38_ARG_SECTRIM1 0x81
60 #define INAND_CMD38_ARG_SECTRIM2 0x88
61 #define MMC_BLK_TIMEOUT_MS  (10 * 60 * 1000)        /* 10 minute timeout */
62 #define MMC_SANITIZE_REQ_TIMEOUT 240000
63 #define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
64
65 #define mmc_req_rel_wr(req)     (((req->cmd_flags & REQ_FUA) || \
66                                   (req->cmd_flags & REQ_META)) && \
67                                   (rq_data_dir(req) == WRITE))
68 #define PACKED_CMD_VER  0x01
69 #define PACKED_CMD_WR   0x02
70
71 static DEFINE_MUTEX(block_mutex);
72
73 /*
74  * The defaults come from config options but can be overriden by module
75  * or bootarg options.
76  */
77 static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
78
79 /*
80  * We've only got one major, so number of mmcblk devices is
81  * limited to (1 << 20) / number of minors per device.  It is also
82  * currently limited by the size of the static bitmaps below.
83  */
84 static int max_devices;
85
86 #define MAX_DEVICES 256
87
88 /* TODO: Replace these with struct ida */
89 static DECLARE_BITMAP(dev_use, MAX_DEVICES);
90 static DECLARE_BITMAP(name_use, MAX_DEVICES);
91
92 /*
93  * There is one mmc_blk_data per slot.
94  */
95 struct mmc_blk_data {
96         spinlock_t      lock;
97         struct gendisk  *disk;
98         struct mmc_queue queue;
99         struct list_head part;
100
101         unsigned int    flags;
102 #define MMC_BLK_CMD23   (1 << 0)        /* Can do SET_BLOCK_COUNT for multiblock */
103 #define MMC_BLK_REL_WR  (1 << 1)        /* MMC Reliable write support */
104 #define MMC_BLK_PACKED_CMD      (1 << 2)        /* MMC packed command support */
105
106         unsigned int    usage;
107         unsigned int    read_only;
108         unsigned int    part_type;
109         unsigned int    name_idx;
110         unsigned int    reset_done;
111 #define MMC_BLK_READ            BIT(0)
112 #define MMC_BLK_WRITE           BIT(1)
113 #define MMC_BLK_DISCARD         BIT(2)
114 #define MMC_BLK_SECDISCARD      BIT(3)
115
116         /*
117          * Only set in main mmc_blk_data associated
118          * with mmc_card with dev_set_drvdata, and keeps
119          * track of the current selected device partition.
120          */
121         unsigned int    part_curr;
122         struct device_attribute force_ro;
123         struct device_attribute power_ro_lock;
124         int     area_type;
125 };
126
127 static DEFINE_MUTEX(open_lock);
128
129 enum {
130         MMC_PACKED_NR_IDX = -1,
131         MMC_PACKED_NR_ZERO,
132         MMC_PACKED_NR_SINGLE,
133 };
134
135 module_param(perdev_minors, int, 0444);
136 MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
137
138 static inline int mmc_blk_part_switch(struct mmc_card *card,
139                                       struct mmc_blk_data *md);
140 static int get_card_status(struct mmc_card *card, u32 *status, int retries);
141
142 static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq)
143 {
144         struct mmc_packed *packed = mqrq->packed;
145
146         BUG_ON(!packed);
147
148         mqrq->cmd_type = MMC_PACKED_NONE;
149         packed->nr_entries = MMC_PACKED_NR_ZERO;
150         packed->idx_failure = MMC_PACKED_NR_IDX;
151         packed->retries = 0;
152         packed->blocks = 0;
153 }
154
155 static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
156 {
157         struct mmc_blk_data *md;
158
159         mutex_lock(&open_lock);
160         md = disk->private_data;
161         if (md && md->usage == 0)
162                 md = NULL;
163         if (md)
164                 md->usage++;
165         mutex_unlock(&open_lock);
166
167         return md;
168 }
169
170 static inline int mmc_get_devidx(struct gendisk *disk)
171 {
172         int devmaj = MAJOR(disk_devt(disk));
173         int devidx = MINOR(disk_devt(disk)) / perdev_minors;
174
175         if (!devmaj)
176                 devidx = disk->first_minor / perdev_minors;
177         return devidx;
178 }
179
180 static void mmc_blk_put(struct mmc_blk_data *md)
181 {
182         mutex_lock(&open_lock);
183         md->usage--;
184         if (md->usage == 0) {
185                 int devidx = mmc_get_devidx(md->disk);
186                 blk_cleanup_queue(md->queue.queue);
187
188                 __clear_bit(devidx, dev_use);
189
190                 put_disk(md->disk);
191                 kfree(md);
192         }
193         mutex_unlock(&open_lock);
194 }
195
196 static ssize_t power_ro_lock_show(struct device *dev,
197                 struct device_attribute *attr, char *buf)
198 {
199         int ret;
200         struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
201         struct mmc_card *card = md->queue.card;
202         int locked = 0;
203
204         if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN)
205                 locked = 2;
206         else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
207                 locked = 1;
208
209         ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
210
211         mmc_blk_put(md);
212
213         return ret;
214 }
215
216 static ssize_t power_ro_lock_store(struct device *dev,
217                 struct device_attribute *attr, const char *buf, size_t count)
218 {
219         int ret;
220         struct mmc_blk_data *md, *part_md;
221         struct mmc_card *card;
222         unsigned long set;
223
224         if (kstrtoul(buf, 0, &set))
225                 return -EINVAL;
226
227         if (set != 1)
228                 return count;
229
230         md = mmc_blk_get(dev_to_disk(dev));
231         card = md->queue.card;
232
233         mmc_get_card(card);
234
235         ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
236                                 card->ext_csd.boot_ro_lock |
237                                 EXT_CSD_BOOT_WP_B_PWR_WP_EN,
238                                 card->ext_csd.part_time);
239         if (ret)
240                 pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md->disk->disk_name, ret);
241         else
242                 card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN;
243
244         mmc_put_card(card);
245
246         if (!ret) {
247                 pr_info("%s: Locking boot partition ro until next power on\n",
248                         md->disk->disk_name);
249                 set_disk_ro(md->disk, 1);
250
251                 list_for_each_entry(part_md, &md->part, part)
252                         if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) {
253                                 pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name);
254                                 set_disk_ro(part_md->disk, 1);
255                         }
256         }
257
258         mmc_blk_put(md);
259         return count;
260 }
261
262 static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
263                              char *buf)
264 {
265         int ret;
266         struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
267
268         ret = snprintf(buf, PAGE_SIZE, "%d\n",
269                        get_disk_ro(dev_to_disk(dev)) ^
270                        md->read_only);
271         mmc_blk_put(md);
272         return ret;
273 }
274
275 static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
276                               const char *buf, size_t count)
277 {
278         int ret;
279         char *end;
280         struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
281         unsigned long set = simple_strtoul(buf, &end, 0);
282         if (end == buf) {
283                 ret = -EINVAL;
284                 goto out;
285         }
286
287         set_disk_ro(dev_to_disk(dev), set || md->read_only);
288         ret = count;
289 out:
290         mmc_blk_put(md);
291         return ret;
292 }
293
294 static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
295 {
296         struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
297         int ret = -ENXIO;
298
299         mutex_lock(&block_mutex);
300         if (md) {
301                 if (md->usage == 2)
302                         check_disk_change(bdev);
303                 ret = 0;
304
305                 if ((mode & FMODE_WRITE) && md->read_only) {
306                         mmc_blk_put(md);
307                         ret = -EROFS;
308                 }
309         }
310         mutex_unlock(&block_mutex);
311
312         return ret;
313 }
314
315 static void mmc_blk_release(struct gendisk *disk, fmode_t mode)
316 {
317         struct mmc_blk_data *md = disk->private_data;
318
319         mutex_lock(&block_mutex);
320         mmc_blk_put(md);
321         mutex_unlock(&block_mutex);
322 }
323
324 static int
325 mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
326 {
327         geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
328         geo->heads = 4;
329         geo->sectors = 16;
330         return 0;
331 }
332
333 struct mmc_blk_ioc_data {
334         struct mmc_ioc_cmd ic;
335         unsigned char *buf;
336         u64 buf_bytes;
337 };
338
339 static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
340         struct mmc_ioc_cmd __user *user)
341 {
342         struct mmc_blk_ioc_data *idata;
343         int err;
344
345         idata = kzalloc(sizeof(*idata), GFP_KERNEL);
346         if (!idata) {
347                 err = -ENOMEM;
348                 goto out;
349         }
350
351         if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
352                 err = -EFAULT;
353                 goto idata_err;
354         }
355
356         idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
357         if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
358                 err = -EOVERFLOW;
359                 goto idata_err;
360         }
361
362         if (!idata->buf_bytes)
363                 return idata;
364
365         idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL);
366         if (!idata->buf) {
367                 err = -ENOMEM;
368                 goto idata_err;
369         }
370
371         if (copy_from_user(idata->buf, (void __user *)(unsigned long)
372                                         idata->ic.data_ptr, idata->buf_bytes)) {
373                 err = -EFAULT;
374                 goto copy_err;
375         }
376
377         return idata;
378
379 copy_err:
380         kfree(idata->buf);
381 idata_err:
382         kfree(idata);
383 out:
384         return ERR_PTR(err);
385 }
386
387 static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status,
388                                        u32 retries_max)
389 {
390         int err;
391         u32 retry_count = 0;
392
393         if (!status || !retries_max)
394                 return -EINVAL;
395
396         do {
397                 err = get_card_status(card, status, 5);
398                 if (err)
399                         break;
400
401                 if (!R1_STATUS(*status) &&
402                                 (R1_CURRENT_STATE(*status) != R1_STATE_PRG))
403                         break; /* RPMB programming operation complete */
404
405                 /*
406                  * Rechedule to give the MMC device a chance to continue
407                  * processing the previous command without being polled too
408                  * frequently.
409                  */
410                 usleep_range(1000, 5000);
411         } while (++retry_count < retries_max);
412
413         if (retry_count == retries_max)
414                 err = -EPERM;
415
416         return err;
417 }
418
419 static int ioctl_do_sanitize(struct mmc_card *card)
420 {
421         int err;
422
423         if (!mmc_can_sanitize(card)) {
424                         pr_warn("%s: %s - SANITIZE is not supported\n",
425                                 mmc_hostname(card->host), __func__);
426                         err = -EOPNOTSUPP;
427                         goto out;
428         }
429
430         pr_debug("%s: %s - SANITIZE IN PROGRESS...\n",
431                 mmc_hostname(card->host), __func__);
432
433         err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
434                                         EXT_CSD_SANITIZE_START, 1,
435                                         MMC_SANITIZE_REQ_TIMEOUT);
436
437         if (err)
438                 pr_err("%s: %s - EXT_CSD_SANITIZE_START failed. err=%d\n",
439                        mmc_hostname(card->host), __func__, err);
440
441         pr_debug("%s: %s - SANITIZE COMPLETED\n", mmc_hostname(card->host),
442                                              __func__);
443 out:
444         return err;
445 }
446
447 static int mmc_blk_ioctl_cmd(struct block_device *bdev,
448         struct mmc_ioc_cmd __user *ic_ptr)
449 {
450         struct mmc_blk_ioc_data *idata;
451         struct mmc_blk_data *md;
452         struct mmc_card *card;
453         struct mmc_command cmd = {0};
454         struct mmc_data data = {0};
455         struct mmc_request mrq = {NULL};
456         struct scatterlist sg;
457         int err;
458         int is_rpmb = false;
459         u32 status = 0;
460
461         /*
462          * The caller must have CAP_SYS_RAWIO, and must be calling this on the
463          * whole block device, not on a partition.  This prevents overspray
464          * between sibling partitions.
465          */
466         if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
467                 return -EPERM;
468
469         idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
470         if (IS_ERR(idata))
471                 return PTR_ERR(idata);
472
473         md = mmc_blk_get(bdev->bd_disk);
474         if (!md) {
475                 err = -EINVAL;
476                 goto cmd_err;
477         }
478
479         if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
480                 is_rpmb = true;
481
482         card = md->queue.card;
483         if (IS_ERR(card)) {
484                 err = PTR_ERR(card);
485                 goto cmd_done;
486         }
487
488         cmd.opcode = idata->ic.opcode;
489         cmd.arg = idata->ic.arg;
490         cmd.flags = idata->ic.flags;
491
492         if (idata->buf_bytes) {
493                 data.sg = &sg;
494                 data.sg_len = 1;
495                 data.blksz = idata->ic.blksz;
496                 data.blocks = idata->ic.blocks;
497
498                 sg_init_one(data.sg, idata->buf, idata->buf_bytes);
499
500                 if (idata->ic.write_flag)
501                         data.flags = MMC_DATA_WRITE;
502                 else
503                         data.flags = MMC_DATA_READ;
504
505                 /* data.flags must already be set before doing this. */
506                 mmc_set_data_timeout(&data, card);
507
508                 /* Allow overriding the timeout_ns for empirical tuning. */
509                 if (idata->ic.data_timeout_ns)
510                         data.timeout_ns = idata->ic.data_timeout_ns;
511
512                 if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
513                         /*
514                          * Pretend this is a data transfer and rely on the
515                          * host driver to compute timeout.  When all host
516                          * drivers support cmd.cmd_timeout for R1B, this
517                          * can be changed to:
518                          *
519                          *     mrq.data = NULL;
520                          *     cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
521                          */
522                         data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
523                 }
524
525                 mrq.data = &data;
526         }
527
528         mrq.cmd = &cmd;
529
530         mmc_get_card(card);
531
532         err = mmc_blk_part_switch(card, md);
533         if (err)
534                 goto cmd_rel_host;
535
536         if (idata->ic.is_acmd) {
537                 err = mmc_app_cmd(card->host, card);
538                 if (err)
539                         goto cmd_rel_host;
540         }
541
542         if (is_rpmb) {
543                 err = mmc_set_blockcount(card, data.blocks,
544                         idata->ic.write_flag & (1 << 31));
545                 if (err)
546                         goto cmd_rel_host;
547         }
548
549         if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) &&
550             (cmd.opcode == MMC_SWITCH)) {
551                 err = ioctl_do_sanitize(card);
552
553                 if (err)
554                         pr_err("%s: ioctl_do_sanitize() failed. err = %d",
555                                __func__, err);
556
557                 goto cmd_rel_host;
558         }
559
560         mmc_wait_for_req(card->host, &mrq);
561
562         if (cmd.error) {
563                 dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
564                                                 __func__, cmd.error);
565                 err = cmd.error;
566                 goto cmd_rel_host;
567         }
568         if (data.error) {
569                 dev_err(mmc_dev(card->host), "%s: data error %d\n",
570                                                 __func__, data.error);
571                 err = data.error;
572                 goto cmd_rel_host;
573         }
574
575         /*
576          * According to the SD specs, some commands require a delay after
577          * issuing the command.
578          */
579         if (idata->ic.postsleep_min_us)
580                 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
581
582         if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
583                 err = -EFAULT;
584                 goto cmd_rel_host;
585         }
586
587         if (!idata->ic.write_flag) {
588                 if (copy_to_user((void __user *)(unsigned long) idata->ic.data_ptr,
589                                                 idata->buf, idata->buf_bytes)) {
590                         err = -EFAULT;
591                         goto cmd_rel_host;
592                 }
593         }
594
595         if (is_rpmb) {
596                 /*
597                  * Ensure RPMB command has completed by polling CMD13
598                  * "Send Status".
599                  */
600                 err = ioctl_rpmb_card_status_poll(card, &status, 5);
601                 if (err)
602                         dev_err(mmc_dev(card->host),
603                                         "%s: Card Status=0x%08X, error %d\n",
604                                         __func__, status, err);
605         }
606
607 cmd_rel_host:
608         mmc_put_card(card);
609
610 cmd_done:
611         mmc_blk_put(md);
612 cmd_err:
613         kfree(idata->buf);
614         kfree(idata);
615         return err;
616 }
617
618 static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
619         unsigned int cmd, unsigned long arg)
620 {
621         int ret = -EINVAL;
622         if (cmd == MMC_IOC_CMD)
623                 ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg);
624         return ret;
625 }
626
627 #ifdef CONFIG_COMPAT
628 static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
629         unsigned int cmd, unsigned long arg)
630 {
631         return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
632 }
633 #endif
634
635 static const struct block_device_operations mmc_bdops = {
636         .open                   = mmc_blk_open,
637         .release                = mmc_blk_release,
638         .getgeo                 = mmc_blk_getgeo,
639         .owner                  = THIS_MODULE,
640         .ioctl                  = mmc_blk_ioctl,
641 #ifdef CONFIG_COMPAT
642         .compat_ioctl           = mmc_blk_compat_ioctl,
643 #endif
644 };
645
646 static inline int mmc_blk_part_switch(struct mmc_card *card,
647                                       struct mmc_blk_data *md)
648 {
649         int ret;
650         struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev);
651
652         if (main_md->part_curr == md->part_type)
653                 return 0;
654
655         if (mmc_card_mmc(card)) {
656                 u8 part_config = card->ext_csd.part_config;
657
658                 part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
659                 part_config |= md->part_type;
660
661                 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
662                                  EXT_CSD_PART_CONFIG, part_config,
663                                  card->ext_csd.part_time);
664                 if (ret)
665                         return ret;
666
667                 card->ext_csd.part_config = part_config;
668         }
669
670         main_md->part_curr = md->part_type;
671         return 0;
672 }
673
674 static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
675 {
676         int err;
677         u32 result;
678         __be32 *blocks;
679
680         struct mmc_request mrq = {NULL};
681         struct mmc_command cmd = {0};
682         struct mmc_data data = {0};
683
684         struct scatterlist sg;
685
686         cmd.opcode = MMC_APP_CMD;
687         cmd.arg = card->rca << 16;
688         cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
689
690         err = mmc_wait_for_cmd(card->host, &cmd, 0);
691         if (err)
692                 return (u32)-1;
693         if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
694                 return (u32)-1;
695
696         memset(&cmd, 0, sizeof(struct mmc_command));
697
698         cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
699         cmd.arg = 0;
700         cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
701
702         data.blksz = 4;
703         data.blocks = 1;
704         data.flags = MMC_DATA_READ;
705         data.sg = &sg;
706         data.sg_len = 1;
707         mmc_set_data_timeout(&data, card);
708
709         mrq.cmd = &cmd;
710         mrq.data = &data;
711
712         blocks = kmalloc(4, GFP_KERNEL);
713         if (!blocks)
714                 return (u32)-1;
715
716         sg_init_one(&sg, blocks, 4);
717
718         mmc_wait_for_req(card->host, &mrq);
719
720         result = ntohl(*blocks);
721         kfree(blocks);
722
723         if (cmd.error || data.error)
724                 result = (u32)-1;
725
726         return result;
727 }
728
729 static int get_card_status(struct mmc_card *card, u32 *status, int retries)
730 {
731         struct mmc_command cmd = {0};
732         int err;
733
734         cmd.opcode = MMC_SEND_STATUS;
735         if (!mmc_host_is_spi(card->host))
736                 cmd.arg = card->rca << 16;
737         cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
738         err = mmc_wait_for_cmd(card->host, &cmd, retries);
739         if (err == 0)
740                 *status = cmd.resp[0];
741         return err;
742 }
743
744 static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
745                 bool hw_busy_detect, struct request *req, int *gen_err)
746 {
747         unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
748         int err = 0;
749         u32 status;
750
751         do {
752                 err = get_card_status(card, &status, 5);
753                 if (err) {
754                         pr_err("%s: error %d requesting status\n",
755                                req->rq_disk->disk_name, err);
756                         return err;
757                 }
758
759                 if (status & R1_ERROR) {
760                         pr_err("%s: %s: error sending status cmd, status %#x\n",
761                                 req->rq_disk->disk_name, __func__, status);
762                         *gen_err = 1;
763                 }
764
765                 /* We may rely on the host hw to handle busy detection.*/
766                 if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) &&
767                         hw_busy_detect)
768                         break;
769
770                 /*
771                  * Timeout if the device never becomes ready for data and never
772                  * leaves the program state.
773                  */
774                 if (time_after(jiffies, timeout)) {
775                         pr_err("%s: Card stuck in programming state! %s %s\n",
776                                 mmc_hostname(card->host),
777                                 req->rq_disk->disk_name, __func__);
778                         return -ETIMEDOUT;
779                 }
780
781                 /*
782                  * Some cards mishandle the status bits,
783                  * so make sure to check both the busy
784                  * indication and the card state.
785                  */
786         } while (!(status & R1_READY_FOR_DATA) ||
787                  (R1_CURRENT_STATE(status) == R1_STATE_PRG));
788
789         return err;
790 }
791
792 static int send_stop(struct mmc_card *card, unsigned int timeout_ms,
793                 struct request *req, int *gen_err, u32 *stop_status)
794 {
795         struct mmc_host *host = card->host;
796         struct mmc_command cmd = {0};
797         int err;
798         bool use_r1b_resp = rq_data_dir(req) == WRITE;
799
800         /*
801          * Normally we use R1B responses for WRITE, but in cases where the host
802          * has specified a max_busy_timeout we need to validate it. A failure
803          * means we need to prevent the host from doing hw busy detection, which
804          * is done by converting to a R1 response instead.
805          */
806         if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout))
807                 use_r1b_resp = false;
808
809         cmd.opcode = MMC_STOP_TRANSMISSION;
810         if (use_r1b_resp) {
811                 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
812                 cmd.busy_timeout = timeout_ms;
813         } else {
814                 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
815         }
816
817         err = mmc_wait_for_cmd(host, &cmd, 5);
818         if (err)
819                 return err;
820
821         *stop_status = cmd.resp[0];
822
823         /* No need to check card status in case of READ. */
824         if (rq_data_dir(req) == READ)
825                 return 0;
826
827         if (!mmc_host_is_spi(host) &&
828                 (*stop_status & R1_ERROR)) {
829                 pr_err("%s: %s: general error sending stop command, resp %#x\n",
830                         req->rq_disk->disk_name, __func__, *stop_status);
831                 *gen_err = 1;
832         }
833
834         return card_busy_detect(card, timeout_ms, use_r1b_resp, req, gen_err);
835 }
836
837 #define ERR_NOMEDIUM    3
838 #define ERR_RETRY       2
839 #define ERR_ABORT       1
840 #define ERR_CONTINUE    0
841
842 static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
843         bool status_valid, u32 status)
844 {
845         switch (error) {
846         case -EILSEQ:
847                 /* response crc error, retry the r/w cmd */
848                 pr_err("%s: %s sending %s command, card status %#x\n",
849                         req->rq_disk->disk_name, "response CRC error",
850                         name, status);
851                 return ERR_RETRY;
852
853         case -ETIMEDOUT:
854                 pr_err("%s: %s sending %s command, card status %#x\n",
855                         req->rq_disk->disk_name, "timed out", name, status);
856
857                 /* If the status cmd initially failed, retry the r/w cmd */
858                 if (!status_valid)
859                         return ERR_RETRY;
860
861                 /*
862                  * If it was a r/w cmd crc error, or illegal command
863                  * (eg, issued in wrong state) then retry - we should
864                  * have corrected the state problem above.
865                  */
866                 if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND))
867                         return ERR_RETRY;
868
869                 /* Otherwise abort the command */
870                 return ERR_ABORT;
871
872         default:
873                 /* We don't understand the error code the driver gave us */
874                 pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
875                        req->rq_disk->disk_name, error, status);
876                 return ERR_ABORT;
877         }
878 }
879
880 /*
881  * Initial r/w and stop cmd error recovery.
882  * We don't know whether the card received the r/w cmd or not, so try to
883  * restore things back to a sane state.  Essentially, we do this as follows:
884  * - Obtain card status.  If the first attempt to obtain card status fails,
885  *   the status word will reflect the failed status cmd, not the failed
886  *   r/w cmd.  If we fail to obtain card status, it suggests we can no
887  *   longer communicate with the card.
888  * - Check the card state.  If the card received the cmd but there was a
889  *   transient problem with the response, it might still be in a data transfer
890  *   mode.  Try to send it a stop command.  If this fails, we can't recover.
891  * - If the r/w cmd failed due to a response CRC error, it was probably
892  *   transient, so retry the cmd.
893  * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
894  * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
895  *   illegal cmd, retry.
896  * Otherwise we don't understand what happened, so abort.
897  */
898 static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
899         struct mmc_blk_request *brq, int *ecc_err, int *gen_err)
900 {
901         bool prev_cmd_status_valid = true;
902         u32 status, stop_status = 0;
903         int err, retry;
904
905         if (mmc_card_removed(card))
906                 return ERR_NOMEDIUM;
907
908         /*
909          * Try to get card status which indicates both the card state
910          * and why there was no response.  If the first attempt fails,
911          * we can't be sure the returned status is for the r/w command.
912          */
913         for (retry = 2; retry >= 0; retry--) {
914                 err = get_card_status(card, &status, 0);
915                 if (!err)
916                         break;
917
918                 /* Re-tune if needed */
919                 mmc_retune_recheck(card->host);
920
921                 prev_cmd_status_valid = false;
922                 pr_err("%s: error %d sending status command, %sing\n",
923                        req->rq_disk->disk_name, err, retry ? "retry" : "abort");
924         }
925
926         /* We couldn't get a response from the card.  Give up. */
927         if (err) {
928                 /* Check if the card is removed */
929                 if (mmc_detect_card_removed(card->host))
930                         return ERR_NOMEDIUM;
931                 return ERR_ABORT;
932         }
933
934         /* Flag ECC errors */
935         if ((status & R1_CARD_ECC_FAILED) ||
936             (brq->stop.resp[0] & R1_CARD_ECC_FAILED) ||
937             (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
938                 *ecc_err = 1;
939
940         /* Flag General errors */
941         if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
942                 if ((status & R1_ERROR) ||
943                         (brq->stop.resp[0] & R1_ERROR)) {
944                         pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
945                                req->rq_disk->disk_name, __func__,
946                                brq->stop.resp[0], status);
947                         *gen_err = 1;
948                 }
949
950         /*
951          * Check the current card state.  If it is in some data transfer
952          * mode, tell it to stop (and hopefully transition back to TRAN.)
953          */
954         if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
955             R1_CURRENT_STATE(status) == R1_STATE_RCV) {
956                 err = send_stop(card,
957                         DIV_ROUND_UP(brq->data.timeout_ns, 1000000),
958                         req, gen_err, &stop_status);
959                 if (err) {
960                         pr_err("%s: error %d sending stop command\n",
961                                req->rq_disk->disk_name, err);
962                         /*
963                          * If the stop cmd also timed out, the card is probably
964                          * not present, so abort. Other errors are bad news too.
965                          */
966                         return ERR_ABORT;
967                 }
968
969                 if (stop_status & R1_CARD_ECC_FAILED)
970                         *ecc_err = 1;
971         }
972
973         /* Check for set block count errors */
974         if (brq->sbc.error)
975                 return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error,
976                                 prev_cmd_status_valid, status);
977
978         /* Check for r/w command errors */
979         if (brq->cmd.error)
980                 return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
981                                 prev_cmd_status_valid, status);
982
983         /* Data errors */
984         if (!brq->stop.error)
985                 return ERR_CONTINUE;
986
987         /* Now for stop errors.  These aren't fatal to the transfer. */
988         pr_info("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
989                req->rq_disk->disk_name, brq->stop.error,
990                brq->cmd.resp[0], status);
991
992         /*
993          * Subsitute in our own stop status as this will give the error
994          * state which happened during the execution of the r/w command.
995          */
996         if (stop_status) {
997                 brq->stop.resp[0] = stop_status;
998                 brq->stop.error = 0;
999         }
1000         return ERR_CONTINUE;
1001 }
1002
1003 static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
1004                          int type)
1005 {
1006         int err;
1007
1008         if (md->reset_done & type)
1009                 return -EEXIST;
1010
1011         md->reset_done |= type;
1012         err = mmc_hw_reset(host);
1013         /* Ensure we switch back to the correct partition */
1014         if (err != -EOPNOTSUPP) {
1015                 struct mmc_blk_data *main_md =
1016                         dev_get_drvdata(&host->card->dev);
1017                 int part_err;
1018
1019                 main_md->part_curr = main_md->part_type;
1020                 part_err = mmc_blk_part_switch(host->card, md);
1021                 if (part_err) {
1022                         /*
1023                          * We have failed to get back into the correct
1024                          * partition, so we need to abort the whole request.
1025                          */
1026                         return -ENODEV;
1027                 }
1028         }
1029         return err;
1030 }
1031
1032 static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
1033 {
1034         md->reset_done &= ~type;
1035 }
1036
1037 int mmc_access_rpmb(struct mmc_queue *mq)
1038 {
1039         struct mmc_blk_data *md = mq->data;
1040         /*
1041          * If this is a RPMB partition access, return ture
1042          */
1043         if (md && md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
1044                 return true;
1045
1046         return false;
1047 }
1048
1049 static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
1050 {
1051         struct mmc_blk_data *md = mq->data;
1052         struct mmc_card *card = md->queue.card;
1053         unsigned int from, nr, arg;
1054         int err = 0, type = MMC_BLK_DISCARD;
1055
1056         if (!mmc_can_erase(card)) {
1057                 err = -EOPNOTSUPP;
1058                 goto out;
1059         }
1060
1061         from = blk_rq_pos(req);
1062         nr = blk_rq_sectors(req);
1063
1064         if (mmc_can_discard(card))
1065                 arg = MMC_DISCARD_ARG;
1066         else if (mmc_can_trim(card))
1067                 arg = MMC_TRIM_ARG;
1068         else
1069                 arg = MMC_ERASE_ARG;
1070 retry:
1071         if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1072                 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1073                                  INAND_CMD38_ARG_EXT_CSD,
1074                                  arg == MMC_TRIM_ARG ?
1075                                  INAND_CMD38_ARG_TRIM :
1076                                  INAND_CMD38_ARG_ERASE,
1077                                  0);
1078                 if (err)
1079                         goto out;
1080         }
1081         err = mmc_erase(card, from, nr, arg);
1082 out:
1083         if (err == -EIO && !mmc_blk_reset(md, card->host, type))
1084                 goto retry;
1085         if (!err)
1086                 mmc_blk_reset_success(md, type);
1087         blk_end_request(req, err, blk_rq_bytes(req));
1088
1089         return err ? 0 : 1;
1090 }
1091
1092 static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
1093                                        struct request *req)
1094 {
1095         struct mmc_blk_data *md = mq->data;
1096         struct mmc_card *card = md->queue.card;
1097         unsigned int from, nr, arg;
1098         int err = 0, type = MMC_BLK_SECDISCARD;
1099
1100         if (!(mmc_can_secure_erase_trim(card))) {
1101                 err = -EOPNOTSUPP;
1102                 goto out;
1103         }
1104
1105         from = blk_rq_pos(req);
1106         nr = blk_rq_sectors(req);
1107
1108         if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
1109                 arg = MMC_SECURE_TRIM1_ARG;
1110         else
1111                 arg = MMC_SECURE_ERASE_ARG;
1112
1113 retry:
1114         if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1115                 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1116                                  INAND_CMD38_ARG_EXT_CSD,
1117                                  arg == MMC_SECURE_TRIM1_ARG ?
1118                                  INAND_CMD38_ARG_SECTRIM1 :
1119                                  INAND_CMD38_ARG_SECERASE,
1120                                  0);
1121                 if (err)
1122                         goto out_retry;
1123         }
1124
1125         err = mmc_erase(card, from, nr, arg);
1126         if (err == -EIO)
1127                 goto out_retry;
1128         if (err)
1129                 goto out;
1130
1131         if (arg == MMC_SECURE_TRIM1_ARG) {
1132                 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1133                         err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1134                                          INAND_CMD38_ARG_EXT_CSD,
1135                                          INAND_CMD38_ARG_SECTRIM2,
1136                                          0);
1137                         if (err)
1138                                 goto out_retry;
1139                 }
1140
1141                 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
1142                 if (err == -EIO)
1143                         goto out_retry;
1144                 if (err)
1145                         goto out;
1146         }
1147
1148 out_retry:
1149         if (err && !mmc_blk_reset(md, card->host, type))
1150                 goto retry;
1151         if (!err)
1152                 mmc_blk_reset_success(md, type);
1153 out:
1154         blk_end_request(req, err, blk_rq_bytes(req));
1155
1156         return err ? 0 : 1;
1157 }
1158
1159 static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
1160 {
1161         struct mmc_blk_data *md = mq->data;
1162         struct mmc_card *card = md->queue.card;
1163         int ret = 0;
1164
1165         ret = mmc_flush_cache(card);
1166         if (ret)
1167                 ret = -EIO;
1168
1169         blk_end_request_all(req, ret);
1170
1171         return ret ? 0 : 1;
1172 }
1173
1174 /*
1175  * Reformat current write as a reliable write, supporting
1176  * both legacy and the enhanced reliable write MMC cards.
1177  * In each transfer we'll handle only as much as a single
1178  * reliable write can handle, thus finish the request in
1179  * partial completions.
1180  */
1181 static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
1182                                     struct mmc_card *card,
1183                                     struct request *req)
1184 {
1185         if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
1186                 /* Legacy mode imposes restrictions on transfers. */
1187                 if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
1188                         brq->data.blocks = 1;
1189
1190                 if (brq->data.blocks > card->ext_csd.rel_sectors)
1191                         brq->data.blocks = card->ext_csd.rel_sectors;
1192                 else if (brq->data.blocks < card->ext_csd.rel_sectors)
1193                         brq->data.blocks = 1;
1194         }
1195 }
1196
1197 #define CMD_ERRORS                                                      \
1198         (R1_OUT_OF_RANGE |      /* Command argument out of range */     \
1199          R1_ADDRESS_ERROR |     /* Misaligned address */                \
1200          R1_BLOCK_LEN_ERROR |   /* Transferred block length incorrect */\
1201          R1_WP_VIOLATION |      /* Tried to write to protected block */ \
1202          R1_CC_ERROR |          /* Card controller error */             \
1203          R1_ERROR)              /* General/unknown error */
1204
1205 static int mmc_blk_err_check(struct mmc_card *card,
1206                              struct mmc_async_req *areq)
1207 {
1208         struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
1209                                                     mmc_active);
1210         struct mmc_blk_request *brq = &mq_mrq->brq;
1211         struct request *req = mq_mrq->req;
1212         int need_retune = card->host->need_retune;
1213         int ecc_err = 0, gen_err = 0;
1214
1215         /*
1216          * sbc.error indicates a problem with the set block count
1217          * command.  No data will have been transferred.
1218          *
1219          * cmd.error indicates a problem with the r/w command.  No
1220          * data will have been transferred.
1221          *
1222          * stop.error indicates a problem with the stop command.  Data
1223          * may have been transferred, or may still be transferring.
1224          */
1225         if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
1226             brq->data.error) {
1227                 switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
1228                 case ERR_RETRY:
1229                         return MMC_BLK_RETRY;
1230                 case ERR_ABORT:
1231                         return MMC_BLK_ABORT;
1232                 case ERR_NOMEDIUM:
1233                         return MMC_BLK_NOMEDIUM;
1234                 case ERR_CONTINUE:
1235                         break;
1236                 }
1237         }
1238
1239         /*
1240          * Check for errors relating to the execution of the
1241          * initial command - such as address errors.  No data
1242          * has been transferred.
1243          */
1244         if (brq->cmd.resp[0] & CMD_ERRORS) {
1245                 pr_err("%s: r/w command failed, status = %#x\n",
1246                        req->rq_disk->disk_name, brq->cmd.resp[0]);
1247                 return MMC_BLK_ABORT;
1248         }
1249
1250         /*
1251          * Everything else is either success, or a data error of some
1252          * kind.  If it was a write, we may have transitioned to
1253          * program mode, which we have to wait for it to complete.
1254          */
1255         if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
1256                 int err;
1257
1258                 /* Check stop command response */
1259                 if (brq->stop.resp[0] & R1_ERROR) {
1260                         pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
1261                                req->rq_disk->disk_name, __func__,
1262                                brq->stop.resp[0]);
1263                         gen_err = 1;
1264                 }
1265
1266                 err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, false, req,
1267                                         &gen_err);
1268                 if (err)
1269                         return MMC_BLK_CMD_ERR;
1270         }
1271
1272         /* if general error occurs, retry the write operation. */
1273         if (gen_err) {
1274                 pr_warn("%s: retrying write for general error\n",
1275                                 req->rq_disk->disk_name);
1276                 return MMC_BLK_RETRY;
1277         }
1278
1279         if (brq->data.error) {
1280                 if (need_retune && !brq->retune_retry_done) {
1281                         pr_info("%s: retrying because a re-tune was needed\n",
1282                                 req->rq_disk->disk_name);
1283                         brq->retune_retry_done = 1;
1284                         return MMC_BLK_RETRY;
1285                 }
1286                 pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
1287                        req->rq_disk->disk_name, brq->data.error,
1288                        (unsigned)blk_rq_pos(req),
1289                        (unsigned)blk_rq_sectors(req),
1290                        brq->cmd.resp[0], brq->stop.resp[0]);
1291
1292                 if (rq_data_dir(req) == READ) {
1293                         if (ecc_err)
1294                                 return MMC_BLK_ECC_ERR;
1295                         return MMC_BLK_DATA_ERR;
1296                 } else {
1297                         return MMC_BLK_CMD_ERR;
1298                 }
1299         }
1300
1301         if (!brq->data.bytes_xfered)
1302                 return MMC_BLK_RETRY;
1303
1304         if (mmc_packed_cmd(mq_mrq->cmd_type)) {
1305                 if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered))
1306                         return MMC_BLK_PARTIAL;
1307                 else
1308                         return MMC_BLK_SUCCESS;
1309         }
1310
1311         if (blk_rq_bytes(req) != brq->data.bytes_xfered)
1312                 return MMC_BLK_PARTIAL;
1313
1314         return MMC_BLK_SUCCESS;
1315 }
1316
1317 static int mmc_blk_packed_err_check(struct mmc_card *card,
1318                                     struct mmc_async_req *areq)
1319 {
1320         struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
1321                         mmc_active);
1322         struct request *req = mq_rq->req;
1323         struct mmc_packed *packed = mq_rq->packed;
1324         int err, check, status;
1325         u8 *ext_csd;
1326
1327         BUG_ON(!packed);
1328
1329         packed->retries--;
1330         check = mmc_blk_err_check(card, areq);
1331         err = get_card_status(card, &status, 0);
1332         if (err) {
1333                 pr_err("%s: error %d sending status command\n",
1334                        req->rq_disk->disk_name, err);
1335                 return MMC_BLK_ABORT;
1336         }
1337
1338         if (status & R1_EXCEPTION_EVENT) {
1339                 err = mmc_get_ext_csd(card, &ext_csd);
1340                 if (err) {
1341                         pr_err("%s: error %d sending ext_csd\n",
1342                                req->rq_disk->disk_name, err);
1343                         return MMC_BLK_ABORT;
1344                 }
1345
1346                 if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] &
1347                      EXT_CSD_PACKED_FAILURE) &&
1348                     (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
1349                      EXT_CSD_PACKED_GENERIC_ERROR)) {
1350                         if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
1351                             EXT_CSD_PACKED_INDEXED_ERROR) {
1352                                 packed->idx_failure =
1353                                   ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1;
1354                                 check = MMC_BLK_PARTIAL;
1355                         }
1356                         pr_err("%s: packed cmd failed, nr %u, sectors %u, "
1357                                "failure index: %d\n",
1358                                req->rq_disk->disk_name, packed->nr_entries,
1359                                packed->blocks, packed->idx_failure);
1360                 }
1361                 kfree(ext_csd);
1362         }
1363
1364         return check;
1365 }
1366
1367 static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
1368                                struct mmc_card *card,
1369                                int disable_multi,
1370                                struct mmc_queue *mq)
1371 {
1372         u32 readcmd, writecmd;
1373         struct mmc_blk_request *brq = &mqrq->brq;
1374         struct request *req = mqrq->req;
1375         struct mmc_blk_data *md = mq->data;
1376         bool do_data_tag;
1377
1378         /*
1379          * Reliable writes are used to implement Forced Unit Access and
1380          * REQ_META accesses, and are supported only on MMCs.
1381          *
1382          * XXX: this really needs a good explanation of why REQ_META
1383          * is treated special.
1384          */
1385         bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
1386                           (req->cmd_flags & REQ_META)) &&
1387                 (rq_data_dir(req) == WRITE) &&
1388                 (md->flags & MMC_BLK_REL_WR);
1389
1390         memset(brq, 0, sizeof(struct mmc_blk_request));
1391         brq->mrq.cmd = &brq->cmd;
1392         brq->mrq.data = &brq->data;
1393
1394         brq->cmd.arg = blk_rq_pos(req);
1395         if (!mmc_card_blockaddr(card))
1396                 brq->cmd.arg <<= 9;
1397         brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
1398         brq->data.blksz = 512;
1399         brq->stop.opcode = MMC_STOP_TRANSMISSION;
1400         brq->stop.arg = 0;
1401         brq->data.blocks = blk_rq_sectors(req);
1402
1403         /*
1404          * The block layer doesn't support all sector count
1405          * restrictions, so we need to be prepared for too big
1406          * requests.
1407          */
1408         if (brq->data.blocks > card->host->max_blk_count)
1409                 brq->data.blocks = card->host->max_blk_count;
1410
1411         if (brq->data.blocks > 1) {
1412                 /*
1413                  * After a read error, we redo the request one sector
1414                  * at a time in order to accurately determine which
1415                  * sectors can be read successfully.
1416                  */
1417                 if (disable_multi)
1418                         brq->data.blocks = 1;
1419
1420                 /*
1421                  * Some controllers have HW issues while operating
1422                  * in multiple I/O mode
1423                  */
1424                 if (card->host->ops->multi_io_quirk)
1425                         brq->data.blocks = card->host->ops->multi_io_quirk(card,
1426                                                 (rq_data_dir(req) == READ) ?
1427                                                 MMC_DATA_READ : MMC_DATA_WRITE,
1428                                                 brq->data.blocks);
1429         }
1430
1431         if (brq->data.blocks > 1 || do_rel_wr) {
1432                 /* SPI multiblock writes terminate using a special
1433                  * token, not a STOP_TRANSMISSION request.
1434                  */
1435                 if (!mmc_host_is_spi(card->host) ||
1436                     rq_data_dir(req) == READ)
1437                         brq->mrq.stop = &brq->stop;
1438                 readcmd = MMC_READ_MULTIPLE_BLOCK;
1439                 writecmd = MMC_WRITE_MULTIPLE_BLOCK;
1440         } else {
1441                 brq->mrq.stop = NULL;
1442                 readcmd = MMC_READ_SINGLE_BLOCK;
1443                 writecmd = MMC_WRITE_BLOCK;
1444         }
1445         if (rq_data_dir(req) == READ) {
1446                 brq->cmd.opcode = readcmd;
1447                 brq->data.flags |= MMC_DATA_READ;
1448                 if (brq->mrq.stop)
1449                         brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 |
1450                                         MMC_CMD_AC;
1451         } else {
1452                 brq->cmd.opcode = writecmd;
1453                 brq->data.flags |= MMC_DATA_WRITE;
1454                 if (brq->mrq.stop)
1455                         brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B |
1456                                         MMC_CMD_AC;
1457         }
1458
1459         if (do_rel_wr)
1460                 mmc_apply_rel_rw(brq, card, req);
1461
1462         /*
1463          * Data tag is used only during writing meta data to speed
1464          * up write and any subsequent read of this meta data
1465          */
1466         do_data_tag = (card->ext_csd.data_tag_unit_size) &&
1467                 (req->cmd_flags & REQ_META) &&
1468                 (rq_data_dir(req) == WRITE) &&
1469                 ((brq->data.blocks * brq->data.blksz) >=
1470                  card->ext_csd.data_tag_unit_size);
1471
1472         /*
1473          * Pre-defined multi-block transfers are preferable to
1474          * open ended-ones (and necessary for reliable writes).
1475          * However, it is not sufficient to just send CMD23,
1476          * and avoid the final CMD12, as on an error condition
1477          * CMD12 (stop) needs to be sent anyway. This, coupled
1478          * with Auto-CMD23 enhancements provided by some
1479          * hosts, means that the complexity of dealing
1480          * with this is best left to the host. If CMD23 is
1481          * supported by card and host, we'll fill sbc in and let
1482          * the host deal with handling it correctly. This means
1483          * that for hosts that don't expose MMC_CAP_CMD23, no
1484          * change of behavior will be observed.
1485          *
1486          * N.B: Some MMC cards experience perf degradation.
1487          * We'll avoid using CMD23-bounded multiblock writes for
1488          * these, while retaining features like reliable writes.
1489          */
1490         if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) &&
1491             (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) ||
1492              do_data_tag)) {
1493                 brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
1494                 brq->sbc.arg = brq->data.blocks |
1495                         (do_rel_wr ? (1 << 31) : 0) |
1496                         (do_data_tag ? (1 << 29) : 0);
1497                 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
1498                 brq->mrq.sbc = &brq->sbc;
1499         }
1500
1501         mmc_set_data_timeout(&brq->data, card);
1502
1503         brq->data.sg = mqrq->sg;
1504         brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
1505
1506         /*
1507          * Adjust the sg list so it is the same size as the
1508          * request.
1509          */
1510         if (brq->data.blocks != blk_rq_sectors(req)) {
1511                 int i, data_size = brq->data.blocks << 9;
1512                 struct scatterlist *sg;
1513
1514                 for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
1515                         data_size -= sg->length;
1516                         if (data_size <= 0) {
1517                                 sg->length += data_size;
1518                                 i++;
1519                                 break;
1520                         }
1521                 }
1522                 brq->data.sg_len = i;
1523         }
1524
1525         mqrq->mmc_active.mrq = &brq->mrq;
1526         mqrq->mmc_active.err_check = mmc_blk_err_check;
1527
1528         mmc_queue_bounce_pre(mqrq);
1529 }
1530
1531 static inline u8 mmc_calc_packed_hdr_segs(struct request_queue *q,
1532                                           struct mmc_card *card)
1533 {
1534         unsigned int hdr_sz = mmc_large_sector(card) ? 4096 : 512;
1535         unsigned int max_seg_sz = queue_max_segment_size(q);
1536         unsigned int len, nr_segs = 0;
1537
1538         do {
1539                 len = min(hdr_sz, max_seg_sz);
1540                 hdr_sz -= len;
1541                 nr_segs++;
1542         } while (hdr_sz);
1543
1544         return nr_segs;
1545 }
1546
1547 static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
1548 {
1549         struct request_queue *q = mq->queue;
1550         struct mmc_card *card = mq->card;
1551         struct request *cur = req, *next = NULL;
1552         struct mmc_blk_data *md = mq->data;
1553         struct mmc_queue_req *mqrq = mq->mqrq_cur;
1554         bool en_rel_wr = card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN;
1555         unsigned int req_sectors = 0, phys_segments = 0;
1556         unsigned int max_blk_count, max_phys_segs;
1557         bool put_back = true;
1558         u8 max_packed_rw = 0;
1559         u8 reqs = 0;
1560
1561         if (!(md->flags & MMC_BLK_PACKED_CMD))
1562                 goto no_packed;
1563
1564         if ((rq_data_dir(cur) == WRITE) &&
1565             mmc_host_packed_wr(card->host))
1566                 max_packed_rw = card->ext_csd.max_packed_writes;
1567
1568         if (max_packed_rw == 0)
1569                 goto no_packed;
1570
1571         if (mmc_req_rel_wr(cur) &&
1572             (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
1573                 goto no_packed;
1574
1575         if (mmc_large_sector(card) &&
1576             !IS_ALIGNED(blk_rq_sectors(cur), 8))
1577                 goto no_packed;
1578
1579         mmc_blk_clear_packed(mqrq);
1580
1581         max_blk_count = min(card->host->max_blk_count,
1582                             card->host->max_req_size >> 9);
1583         if (unlikely(max_blk_count > 0xffff))
1584                 max_blk_count = 0xffff;
1585
1586         max_phys_segs = queue_max_segments(q);
1587         req_sectors += blk_rq_sectors(cur);
1588         phys_segments += cur->nr_phys_segments;
1589
1590         if (rq_data_dir(cur) == WRITE) {
1591                 req_sectors += mmc_large_sector(card) ? 8 : 1;
1592                 phys_segments += mmc_calc_packed_hdr_segs(q, card);
1593         }
1594
1595         do {
1596                 if (reqs >= max_packed_rw - 1) {
1597                         put_back = false;
1598                         break;
1599                 }
1600
1601                 spin_lock_irq(q->queue_lock);
1602                 next = blk_fetch_request(q);
1603                 spin_unlock_irq(q->queue_lock);
1604                 if (!next) {
1605                         put_back = false;
1606                         break;
1607                 }
1608
1609                 if (mmc_large_sector(card) &&
1610                     !IS_ALIGNED(blk_rq_sectors(next), 8))
1611                         break;
1612
1613                 if (next->cmd_flags & REQ_DISCARD ||
1614                     next->cmd_flags & REQ_FLUSH)
1615                         break;
1616
1617                 if (rq_data_dir(cur) != rq_data_dir(next))
1618                         break;
1619
1620                 if (mmc_req_rel_wr(next) &&
1621                     (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
1622                         break;
1623
1624                 req_sectors += blk_rq_sectors(next);
1625                 if (req_sectors > max_blk_count)
1626                         break;
1627
1628                 phys_segments +=  next->nr_phys_segments;
1629                 if (phys_segments > max_phys_segs)
1630                         break;
1631
1632                 list_add_tail(&next->queuelist, &mqrq->packed->list);
1633                 cur = next;
1634                 reqs++;
1635         } while (1);
1636
1637         if (put_back) {
1638                 spin_lock_irq(q->queue_lock);
1639                 blk_requeue_request(q, next);
1640                 spin_unlock_irq(q->queue_lock);
1641         }
1642
1643         if (reqs > 0) {
1644                 list_add(&req->queuelist, &mqrq->packed->list);
1645                 mqrq->packed->nr_entries = ++reqs;
1646                 mqrq->packed->retries = reqs;
1647                 return reqs;
1648         }
1649
1650 no_packed:
1651         mqrq->cmd_type = MMC_PACKED_NONE;
1652         return 0;
1653 }
1654
1655 static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
1656                                         struct mmc_card *card,
1657                                         struct mmc_queue *mq)
1658 {
1659         struct mmc_blk_request *brq = &mqrq->brq;
1660         struct request *req = mqrq->req;
1661         struct request *prq;
1662         struct mmc_blk_data *md = mq->data;
1663         struct mmc_packed *packed = mqrq->packed;
1664         bool do_rel_wr, do_data_tag;
1665         u32 *packed_cmd_hdr;
1666         u8 hdr_blocks;
1667         u8 i = 1;
1668
1669         BUG_ON(!packed);
1670
1671         mqrq->cmd_type = MMC_PACKED_WRITE;
1672         packed->blocks = 0;
1673         packed->idx_failure = MMC_PACKED_NR_IDX;
1674
1675         packed_cmd_hdr = packed->cmd_hdr;
1676         memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr));
1677         packed_cmd_hdr[0] = (packed->nr_entries << 16) |
1678                 (PACKED_CMD_WR << 8) | PACKED_CMD_VER;
1679         hdr_blocks = mmc_large_sector(card) ? 8 : 1;
1680
1681         /*
1682          * Argument for each entry of packed group
1683          */
1684         list_for_each_entry(prq, &packed->list, queuelist) {
1685                 do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR);
1686                 do_data_tag = (card->ext_csd.data_tag_unit_size) &&
1687                         (prq->cmd_flags & REQ_META) &&
1688                         (rq_data_dir(prq) == WRITE) &&
1689                         ((brq->data.blocks * brq->data.blksz) >=
1690                          card->ext_csd.data_tag_unit_size);
1691                 /* Argument of CMD23 */
1692                 packed_cmd_hdr[(i * 2)] =
1693                         (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
1694                         (do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) |
1695                         blk_rq_sectors(prq);
1696                 /* Argument of CMD18 or CMD25 */
1697                 packed_cmd_hdr[((i * 2)) + 1] =
1698                         mmc_card_blockaddr(card) ?
1699                         blk_rq_pos(prq) : blk_rq_pos(prq) << 9;
1700                 packed->blocks += blk_rq_sectors(prq);
1701                 i++;
1702         }
1703
1704         memset(brq, 0, sizeof(struct mmc_blk_request));
1705         brq->mrq.cmd = &brq->cmd;
1706         brq->mrq.data = &brq->data;
1707         brq->mrq.sbc = &brq->sbc;
1708         brq->mrq.stop = &brq->stop;
1709
1710         brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
1711         brq->sbc.arg = MMC_CMD23_ARG_PACKED | (packed->blocks + hdr_blocks);
1712         brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
1713
1714         brq->cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
1715         brq->cmd.arg = blk_rq_pos(req);
1716         if (!mmc_card_blockaddr(card))
1717                 brq->cmd.arg <<= 9;
1718         brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
1719
1720         brq->data.blksz = 512;
1721         brq->data.blocks = packed->blocks + hdr_blocks;
1722         brq->data.flags |= MMC_DATA_WRITE;
1723
1724         brq->stop.opcode = MMC_STOP_TRANSMISSION;
1725         brq->stop.arg = 0;
1726         brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1727
1728         mmc_set_data_timeout(&brq->data, card);
1729
1730         brq->data.sg = mqrq->sg;
1731         brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
1732
1733         mqrq->mmc_active.mrq = &brq->mrq;
1734         mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
1735
1736         mmc_queue_bounce_pre(mqrq);
1737 }
1738
1739 static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
1740                            struct mmc_blk_request *brq, struct request *req,
1741                            int ret)
1742 {
1743         struct mmc_queue_req *mq_rq;
1744         mq_rq = container_of(brq, struct mmc_queue_req, brq);
1745
1746         /*
1747          * If this is an SD card and we're writing, we can first
1748          * mark the known good sectors as ok.
1749          *
1750          * If the card is not SD, we can still ok written sectors
1751          * as reported by the controller (which might be less than
1752          * the real number of written sectors, but never more).
1753          */
1754         if (mmc_card_sd(card)) {
1755                 u32 blocks;
1756
1757                 blocks = mmc_sd_num_wr_blocks(card);
1758                 if (blocks != (u32)-1) {
1759                         ret = blk_end_request(req, 0, blocks << 9);
1760                 }
1761         } else {
1762                 if (!mmc_packed_cmd(mq_rq->cmd_type))
1763                         ret = blk_end_request(req, 0, brq->data.bytes_xfered);
1764         }
1765         return ret;
1766 }
1767
1768 static int mmc_blk_end_packed_req(struct mmc_queue_req *mq_rq)
1769 {
1770         struct request *prq;
1771         struct mmc_packed *packed = mq_rq->packed;
1772         int idx = packed->idx_failure, i = 0;
1773         int ret = 0;
1774
1775         BUG_ON(!packed);
1776
1777         while (!list_empty(&packed->list)) {
1778                 prq = list_entry_rq(packed->list.next);
1779                 if (idx == i) {
1780                         /* retry from error index */
1781                         packed->nr_entries -= idx;
1782                         mq_rq->req = prq;
1783                         ret = 1;
1784
1785                         if (packed->nr_entries == MMC_PACKED_NR_SINGLE) {
1786                                 list_del_init(&prq->queuelist);
1787                                 mmc_blk_clear_packed(mq_rq);
1788                         }
1789                         return ret;
1790                 }
1791                 list_del_init(&prq->queuelist);
1792                 blk_end_request(prq, 0, blk_rq_bytes(prq));
1793                 i++;
1794         }
1795
1796         mmc_blk_clear_packed(mq_rq);
1797         return ret;
1798 }
1799
1800 static void mmc_blk_abort_packed_req(struct mmc_queue_req *mq_rq)
1801 {
1802         struct request *prq;
1803         struct mmc_packed *packed = mq_rq->packed;
1804
1805         BUG_ON(!packed);
1806
1807         while (!list_empty(&packed->list)) {
1808                 prq = list_entry_rq(packed->list.next);
1809                 list_del_init(&prq->queuelist);
1810                 blk_end_request(prq, -EIO, blk_rq_bytes(prq));
1811         }
1812
1813         mmc_blk_clear_packed(mq_rq);
1814 }
1815
1816 static void mmc_blk_revert_packed_req(struct mmc_queue *mq,
1817                                       struct mmc_queue_req *mq_rq)
1818 {
1819         struct request *prq;
1820         struct request_queue *q = mq->queue;
1821         struct mmc_packed *packed = mq_rq->packed;
1822
1823         BUG_ON(!packed);
1824
1825         while (!list_empty(&packed->list)) {
1826                 prq = list_entry_rq(packed->list.prev);
1827                 if (prq->queuelist.prev != &packed->list) {
1828                         list_del_init(&prq->queuelist);
1829                         spin_lock_irq(q->queue_lock);
1830                         blk_requeue_request(mq->queue, prq);
1831                         spin_unlock_irq(q->queue_lock);
1832                 } else {
1833                         list_del_init(&prq->queuelist);
1834                 }
1835         }
1836
1837         mmc_blk_clear_packed(mq_rq);
1838 }
1839
1840 static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
1841 {
1842         struct mmc_blk_data *md = mq->data;
1843         struct mmc_card *card = md->queue.card;
1844         struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
1845         int ret = 1, disable_multi = 0, retry = 0, type, retune_retry_done = 0;
1846         enum mmc_blk_status status;
1847         struct mmc_queue_req *mq_rq;
1848         struct request *req = rqc;
1849         struct mmc_async_req *areq;
1850         const u8 packed_nr = 2;
1851         u8 reqs = 0;
1852
1853         if (!rqc && !mq->mqrq_prev->req)
1854                 return 0;
1855
1856         if (rqc)
1857                 reqs = mmc_blk_prep_packed_list(mq, rqc);
1858
1859         do {
1860                 if (rqc) {
1861                         /*
1862                          * When 4KB native sector is enabled, only 8 blocks
1863                          * multiple read or write is allowed
1864                          */
1865                         if ((brq->data.blocks & 0x07) &&
1866                             (card->ext_csd.data_sector_size == 4096)) {
1867                                 pr_err("%s: Transfer size is not 4KB sector size aligned\n",
1868                                         req->rq_disk->disk_name);
1869                                 mq_rq = mq->mqrq_cur;
1870                                 goto cmd_abort;
1871                         }
1872
1873                         if (reqs >= packed_nr)
1874                                 mmc_blk_packed_hdr_wrq_prep(mq->mqrq_cur,
1875                                                             card, mq);
1876                         else
1877                                 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
1878                         areq = &mq->mqrq_cur->mmc_active;
1879                 } else
1880                         areq = NULL;
1881                 areq = mmc_start_req(card->host, areq, (int *) &status);
1882                 if (!areq) {
1883                         if (status == MMC_BLK_NEW_REQUEST)
1884                                 mq->flags |= MMC_QUEUE_NEW_REQUEST;
1885                         return 0;
1886                 }
1887
1888                 mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
1889                 brq = &mq_rq->brq;
1890                 req = mq_rq->req;
1891                 type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
1892                 mmc_queue_bounce_post(mq_rq);
1893
1894                 switch (status) {
1895                 case MMC_BLK_SUCCESS:
1896                 case MMC_BLK_PARTIAL:
1897                         /*
1898                          * A block was successfully transferred.
1899                          */
1900                         mmc_blk_reset_success(md, type);
1901
1902                         if (mmc_packed_cmd(mq_rq->cmd_type)) {
1903                                 ret = mmc_blk_end_packed_req(mq_rq);
1904                                 break;
1905                         } else {
1906                                 ret = blk_end_request(req, 0,
1907                                                 brq->data.bytes_xfered);
1908                         }
1909
1910                         /*
1911                          * If the blk_end_request function returns non-zero even
1912                          * though all data has been transferred and no errors
1913                          * were returned by the host controller, it's a bug.
1914                          */
1915                         if (status == MMC_BLK_SUCCESS && ret) {
1916                                 pr_err("%s BUG rq_tot %d d_xfer %d\n",
1917                                        __func__, blk_rq_bytes(req),
1918                                        brq->data.bytes_xfered);
1919                                 rqc = NULL;
1920                                 goto cmd_abort;
1921                         }
1922                         break;
1923                 case MMC_BLK_CMD_ERR:
1924                         ret = mmc_blk_cmd_err(md, card, brq, req, ret);
1925                         if (mmc_blk_reset(md, card->host, type))
1926                                 goto cmd_abort;
1927                         if (!ret)
1928                                 goto start_new_req;
1929                         break;
1930                 case MMC_BLK_RETRY:
1931                         retune_retry_done = brq->retune_retry_done;
1932                         if (retry++ < 5)
1933                                 break;
1934                         /* Fall through */
1935                 case MMC_BLK_ABORT:
1936                         if (!mmc_blk_reset(md, card->host, type))
1937                                 break;
1938                         goto cmd_abort;
1939                 case MMC_BLK_DATA_ERR: {
1940                         int err;
1941
1942                         err = mmc_blk_reset(md, card->host, type);
1943                         if (!err)
1944                                 break;
1945                         if (err == -ENODEV ||
1946                                 mmc_packed_cmd(mq_rq->cmd_type))
1947                                 goto cmd_abort;
1948                         /* Fall through */
1949                 }
1950                 case MMC_BLK_ECC_ERR:
1951                         if (brq->data.blocks > 1) {
1952                                 /* Redo read one sector at a time */
1953                                 pr_warn("%s: retrying using single block read\n",
1954                                         req->rq_disk->disk_name);
1955                                 disable_multi = 1;
1956                                 break;
1957                         }
1958                         /*
1959                          * After an error, we redo I/O one sector at a
1960                          * time, so we only reach here after trying to
1961                          * read a single sector.
1962                          */
1963                         ret = blk_end_request(req, -EIO,
1964                                                 brq->data.blksz);
1965                         if (!ret)
1966                                 goto start_new_req;
1967                         break;
1968                 case MMC_BLK_NOMEDIUM:
1969                         goto cmd_abort;
1970                 default:
1971                         pr_err("%s: Unhandled return value (%d)",
1972                                         req->rq_disk->disk_name, status);
1973                         goto cmd_abort;
1974                 }
1975
1976                 if (ret) {
1977                         if (mmc_packed_cmd(mq_rq->cmd_type)) {
1978                                 if (!mq_rq->packed->retries)
1979                                         goto cmd_abort;
1980                                 mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq);
1981                                 mmc_start_req(card->host,
1982                                               &mq_rq->mmc_active, NULL);
1983                         } else {
1984
1985                                 /*
1986                                  * In case of a incomplete request
1987                                  * prepare it again and resend.
1988                                  */
1989                                 mmc_blk_rw_rq_prep(mq_rq, card,
1990                                                 disable_multi, mq);
1991                                 mmc_start_req(card->host,
1992                                                 &mq_rq->mmc_active, NULL);
1993                         }
1994                         mq_rq->brq.retune_retry_done = retune_retry_done;
1995                 }
1996         } while (ret);
1997
1998         return 1;
1999
2000  cmd_abort:
2001         if (mmc_packed_cmd(mq_rq->cmd_type)) {
2002                 mmc_blk_abort_packed_req(mq_rq);
2003         } else {
2004                 if (mmc_card_removed(card))
2005                         req->cmd_flags |= REQ_QUIET;
2006                 while (ret)
2007                         ret = blk_end_request(req, -EIO,
2008                                         blk_rq_cur_bytes(req));
2009         }
2010
2011  start_new_req:
2012         if (rqc) {
2013                 if (mmc_card_removed(card)) {
2014                         rqc->cmd_flags |= REQ_QUIET;
2015                         blk_end_request_all(rqc, -EIO);
2016                 } else {
2017                         /*
2018                          * If current request is packed, it needs to put back.
2019                          */
2020                         if (mmc_packed_cmd(mq->mqrq_cur->cmd_type))
2021                                 mmc_blk_revert_packed_req(mq, mq->mqrq_cur);
2022
2023                         mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
2024                         mmc_start_req(card->host,
2025                                       &mq->mqrq_cur->mmc_active, NULL);
2026                 }
2027         }
2028
2029         return 0;
2030 }
2031
2032 static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
2033 {
2034         int ret;
2035         struct mmc_blk_data *md = mq->data;
2036         struct mmc_card *card = md->queue.card;
2037         struct mmc_host *host = card->host;
2038         unsigned long flags;
2039         unsigned int cmd_flags = req ? req->cmd_flags : 0;
2040
2041         if (req && !mq->mqrq_prev->req)
2042                 /* claim host only for the first request */
2043                 mmc_get_card(card);
2044
2045         ret = mmc_blk_part_switch(card, md);
2046         if (ret) {
2047                 if (req) {
2048                         blk_end_request_all(req, -EIO);
2049                 }
2050                 ret = 0;
2051                 goto out;
2052         }
2053
2054         mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
2055         if (cmd_flags & REQ_DISCARD) {
2056                 /* complete ongoing async transfer before issuing discard */
2057                 if (card->host->areq)
2058                         mmc_blk_issue_rw_rq(mq, NULL);
2059                 if (req->cmd_flags & REQ_SECURE)
2060                         ret = mmc_blk_issue_secdiscard_rq(mq, req);
2061                 else
2062                         ret = mmc_blk_issue_discard_rq(mq, req);
2063         } else if (cmd_flags & REQ_FLUSH) {
2064                 /* complete ongoing async transfer before issuing flush */
2065                 if (card->host->areq)
2066                         mmc_blk_issue_rw_rq(mq, NULL);
2067                 ret = mmc_blk_issue_flush(mq, req);
2068         } else {
2069                 if (!req && host->areq) {
2070                         spin_lock_irqsave(&host->context_info.lock, flags);
2071                         host->context_info.is_waiting_last_req = true;
2072                         spin_unlock_irqrestore(&host->context_info.lock, flags);
2073                 }
2074                 ret = mmc_blk_issue_rw_rq(mq, req);
2075         }
2076
2077 out:
2078         if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) ||
2079              (cmd_flags & MMC_REQ_SPECIAL_MASK))
2080                 /*
2081                  * Release host when there are no more requests
2082                  * and after special request(discard, flush) is done.
2083                  * In case sepecial request, there is no reentry to
2084                  * the 'mmc_blk_issue_rq' with 'mqrq_prev->req'.
2085                  */
2086                 mmc_put_card(card);
2087         return ret;
2088 }
2089
2090 static inline int mmc_blk_readonly(struct mmc_card *card)
2091 {
2092         return mmc_card_readonly(card) ||
2093                !(card->csd.cmdclass & CCC_BLOCK_WRITE);
2094 }
2095
2096 static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
2097                                               struct device *parent,
2098                                               sector_t size,
2099                                               bool default_ro,
2100                                               const char *subname,
2101                                               int area_type)
2102 {
2103         struct mmc_blk_data *md;
2104         int devidx, ret;
2105
2106         devidx = find_first_zero_bit(dev_use, max_devices);
2107         if (devidx >= max_devices)
2108                 return ERR_PTR(-ENOSPC);
2109         __set_bit(devidx, dev_use);
2110
2111         md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
2112         if (!md) {
2113                 ret = -ENOMEM;
2114                 goto out;
2115         }
2116
2117         /*
2118          * !subname implies we are creating main mmc_blk_data that will be
2119          * associated with mmc_card with dev_set_drvdata. Due to device
2120          * partitions, devidx will not coincide with a per-physical card
2121          * index anymore so we keep track of a name index.
2122          */
2123         if (!subname) {
2124                 md->name_idx = find_first_zero_bit(name_use, max_devices);
2125                 __set_bit(md->name_idx, name_use);
2126         } else
2127                 md->name_idx = ((struct mmc_blk_data *)
2128                                 dev_to_disk(parent)->private_data)->name_idx;
2129
2130         md->area_type = area_type;
2131
2132         /*
2133          * Set the read-only status based on the supported commands
2134          * and the write protect switch.
2135          */
2136         md->read_only = mmc_blk_readonly(card);
2137
2138         md->disk = alloc_disk(perdev_minors);
2139         if (md->disk == NULL) {
2140                 ret = -ENOMEM;
2141                 goto err_kfree;
2142         }
2143
2144         spin_lock_init(&md->lock);
2145         INIT_LIST_HEAD(&md->part);
2146         md->usage = 1;
2147
2148         ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
2149         if (ret)
2150                 goto err_putdisk;
2151
2152         md->queue.issue_fn = mmc_blk_issue_rq;
2153         md->queue.data = md;
2154
2155         md->disk->major = MMC_BLOCK_MAJOR;
2156         md->disk->first_minor = devidx * perdev_minors;
2157         md->disk->fops = &mmc_bdops;
2158         md->disk->private_data = md;
2159         md->disk->queue = md->queue.queue;
2160         md->disk->driverfs_dev = parent;
2161         set_disk_ro(md->disk, md->read_only || default_ro);
2162         if (area_type & (MMC_BLK_DATA_AREA_RPMB | MMC_BLK_DATA_AREA_BOOT))
2163                 md->disk->flags |= GENHD_FL_NO_PART_SCAN;
2164
2165         /*
2166          * As discussed on lkml, GENHD_FL_REMOVABLE should:
2167          *
2168          * - be set for removable media with permanent block devices
2169          * - be unset for removable block devices with permanent media
2170          *
2171          * Since MMC block devices clearly fall under the second
2172          * case, we do not set GENHD_FL_REMOVABLE.  Userspace
2173          * should use the block device creation/destruction hotplug
2174          * messages to tell when the card is present.
2175          */
2176
2177         snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
2178                  "mmcblk%u%s", md->name_idx, subname ? subname : "");
2179
2180         if (mmc_card_mmc(card))
2181                 blk_queue_logical_block_size(md->queue.queue,
2182                                              card->ext_csd.data_sector_size);
2183         else
2184                 blk_queue_logical_block_size(md->queue.queue, 512);
2185
2186         set_capacity(md->disk, size);
2187
2188         if (mmc_host_cmd23(card->host)) {
2189                 if (mmc_card_mmc(card) ||
2190                     (mmc_card_sd(card) &&
2191                      card->scr.cmds & SD_SCR_CMD23_SUPPORT))
2192                         md->flags |= MMC_BLK_CMD23;
2193         }
2194
2195         if (mmc_card_mmc(card) &&
2196             md->flags & MMC_BLK_CMD23 &&
2197             ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
2198              card->ext_csd.rel_sectors)) {
2199                 md->flags |= MMC_BLK_REL_WR;
2200                 blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
2201         }
2202
2203         if (mmc_card_mmc(card) &&
2204             (area_type == MMC_BLK_DATA_AREA_MAIN) &&
2205             (md->flags & MMC_BLK_CMD23) &&
2206             card->ext_csd.packed_event_en) {
2207                 if (!mmc_packed_init(&md->queue, card))
2208                         md->flags |= MMC_BLK_PACKED_CMD;
2209         }
2210
2211         return md;
2212
2213  err_putdisk:
2214         put_disk(md->disk);
2215  err_kfree:
2216         kfree(md);
2217  out:
2218         return ERR_PTR(ret);
2219 }
2220
2221 static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
2222 {
2223         sector_t size;
2224
2225         if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
2226                 /*
2227                  * The EXT_CSD sector count is in number or 512 byte
2228                  * sectors.
2229                  */
2230                 size = card->ext_csd.sectors;
2231         } else {
2232                 /*
2233                  * The CSD capacity field is in units of read_blkbits.
2234                  * set_capacity takes units of 512 bytes.
2235                  */
2236                 size = (typeof(sector_t))card->csd.capacity
2237                         << (card->csd.read_blkbits - 9);
2238         }
2239
2240         return mmc_blk_alloc_req(card, &card->dev, size, false, NULL,
2241                                         MMC_BLK_DATA_AREA_MAIN);
2242 }
2243
2244 static int mmc_blk_alloc_part(struct mmc_card *card,
2245                               struct mmc_blk_data *md,
2246                               unsigned int part_type,
2247                               sector_t size,
2248                               bool default_ro,
2249                               const char *subname,
2250                               int area_type)
2251 {
2252         char cap_str[10];
2253         struct mmc_blk_data *part_md;
2254
2255         part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
2256                                     subname, area_type);
2257         if (IS_ERR(part_md))
2258                 return PTR_ERR(part_md);
2259         part_md->part_type = part_type;
2260         list_add(&part_md->part, &md->part);
2261
2262         string_get_size((u64)get_capacity(part_md->disk), 512, STRING_UNITS_2,
2263                         cap_str, sizeof(cap_str));
2264         pr_info("%s: %s %s partition %u %s\n",
2265                part_md->disk->disk_name, mmc_card_id(card),
2266                mmc_card_name(card), part_md->part_type, cap_str);
2267         return 0;
2268 }
2269
2270 /* MMC Physical partitions consist of two boot partitions and
2271  * up to four general purpose partitions.
2272  * For each partition enabled in EXT_CSD a block device will be allocatedi
2273  * to provide access to the partition.
2274  */
2275
2276 static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
2277 {
2278         int idx, ret = 0;
2279
2280         if (!mmc_card_mmc(card))
2281                 return 0;
2282
2283         for (idx = 0; idx < card->nr_parts; idx++) {
2284                 if (card->part[idx].size) {
2285                         ret = mmc_blk_alloc_part(card, md,
2286                                 card->part[idx].part_cfg,
2287                                 card->part[idx].size >> 9,
2288                                 card->part[idx].force_ro,
2289                                 card->part[idx].name,
2290                                 card->part[idx].area_type);
2291                         if (ret)
2292                                 return ret;
2293                 }
2294         }
2295
2296         return ret;
2297 }
2298
2299 static void mmc_blk_remove_req(struct mmc_blk_data *md)
2300 {
2301         struct mmc_card *card;
2302
2303         if (md) {
2304                 /*
2305                  * Flush remaining requests and free queues. It
2306                  * is freeing the queue that stops new requests
2307                  * from being accepted.
2308                  */
2309                 card = md->queue.card;
2310                 mmc_cleanup_queue(&md->queue);
2311                 if (md->flags & MMC_BLK_PACKED_CMD)
2312                         mmc_packed_clean(&md->queue);
2313                 if (md->disk->flags & GENHD_FL_UP) {
2314                         device_remove_file(disk_to_dev(md->disk), &md->force_ro);
2315                         if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
2316                                         card->ext_csd.boot_ro_lockable)
2317                                 device_remove_file(disk_to_dev(md->disk),
2318                                         &md->power_ro_lock);
2319
2320                         del_gendisk(md->disk);
2321                 }
2322                 mmc_blk_put(md);
2323         }
2324 }
2325
2326 static void mmc_blk_remove_parts(struct mmc_card *card,
2327                                  struct mmc_blk_data *md)
2328 {
2329         struct list_head *pos, *q;
2330         struct mmc_blk_data *part_md;
2331
2332         __clear_bit(md->name_idx, name_use);
2333         list_for_each_safe(pos, q, &md->part) {
2334                 part_md = list_entry(pos, struct mmc_blk_data, part);
2335                 list_del(pos);
2336                 mmc_blk_remove_req(part_md);
2337         }
2338 }
2339
2340 static int mmc_add_disk(struct mmc_blk_data *md)
2341 {
2342         int ret;
2343         struct mmc_card *card = md->queue.card;
2344
2345         add_disk(md->disk);
2346         md->force_ro.show = force_ro_show;
2347         md->force_ro.store = force_ro_store;
2348         sysfs_attr_init(&md->force_ro.attr);
2349         md->force_ro.attr.name = "force_ro";
2350         md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
2351         ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
2352         if (ret)
2353                 goto force_ro_fail;
2354
2355         if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
2356              card->ext_csd.boot_ro_lockable) {
2357                 umode_t mode;
2358
2359                 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS)
2360                         mode = S_IRUGO;
2361                 else
2362                         mode = S_IRUGO | S_IWUSR;
2363
2364                 md->power_ro_lock.show = power_ro_lock_show;
2365                 md->power_ro_lock.store = power_ro_lock_store;
2366                 sysfs_attr_init(&md->power_ro_lock.attr);
2367                 md->power_ro_lock.attr.mode = mode;
2368                 md->power_ro_lock.attr.name =
2369                                         "ro_lock_until_next_power_on";
2370                 ret = device_create_file(disk_to_dev(md->disk),
2371                                 &md->power_ro_lock);
2372                 if (ret)
2373                         goto power_ro_lock_fail;
2374         }
2375         return ret;
2376
2377 power_ro_lock_fail:
2378         device_remove_file(disk_to_dev(md->disk), &md->force_ro);
2379 force_ro_fail:
2380         del_gendisk(md->disk);
2381
2382         return ret;
2383 }
2384
2385 #define CID_MANFID_SANDISK      0x2
2386 #define CID_MANFID_TOSHIBA      0x11
2387 #define CID_MANFID_MICRON       0x13
2388 #define CID_MANFID_SAMSUNG      0x15
2389
2390 static const struct mmc_fixup blk_fixups[] =
2391 {
2392         MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk,
2393                   MMC_QUIRK_INAND_CMD38),
2394         MMC_FIXUP("SEM04G", CID_MANFID_SANDISK, 0x100, add_quirk,
2395                   MMC_QUIRK_INAND_CMD38),
2396         MMC_FIXUP("SEM08G", CID_MANFID_SANDISK, 0x100, add_quirk,
2397                   MMC_QUIRK_INAND_CMD38),
2398         MMC_FIXUP("SEM16G", CID_MANFID_SANDISK, 0x100, add_quirk,
2399                   MMC_QUIRK_INAND_CMD38),
2400         MMC_FIXUP("SEM32G", CID_MANFID_SANDISK, 0x100, add_quirk,
2401                   MMC_QUIRK_INAND_CMD38),
2402
2403         /*
2404          * Some MMC cards experience performance degradation with CMD23
2405          * instead of CMD12-bounded multiblock transfers. For now we'll
2406          * black list what's bad...
2407          * - Certain Toshiba cards.
2408          *
2409          * N.B. This doesn't affect SD cards.
2410          */
2411         MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
2412                   MMC_QUIRK_BLK_NO_CMD23),
2413         MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
2414                   MMC_QUIRK_BLK_NO_CMD23),
2415         MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
2416                   MMC_QUIRK_BLK_NO_CMD23),
2417
2418         /*
2419          * Some Micron MMC cards needs longer data read timeout than
2420          * indicated in CSD.
2421          */
2422         MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
2423                   MMC_QUIRK_LONG_READ_TIME),
2424
2425         /*
2426          * On these Samsung MoviNAND parts, performing secure erase or
2427          * secure trim can result in unrecoverable corruption due to a
2428          * firmware bug.
2429          */
2430         MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2431                   MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2432         MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2433                   MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2434         MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2435                   MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2436         MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2437                   MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2438         MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2439                   MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2440         MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2441                   MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2442         MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2443                   MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2444         MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2445                   MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2446
2447         END_FIXUP
2448 };
2449
2450 static int mmc_blk_probe(struct mmc_card *card)
2451 {
2452         struct mmc_blk_data *md, *part_md;
2453         char cap_str[10];
2454
2455         /*
2456          * Check that the card supports the command class(es) we need.
2457          */
2458         if (!(card->csd.cmdclass & CCC_BLOCK_READ))
2459                 return -ENODEV;
2460
2461         mmc_fixup_device(card, blk_fixups);
2462
2463         md = mmc_blk_alloc(card);
2464         if (IS_ERR(md))
2465                 return PTR_ERR(md);
2466
2467         string_get_size((u64)get_capacity(md->disk), 512, STRING_UNITS_2,
2468                         cap_str, sizeof(cap_str));
2469         pr_info("%s: %s %s %s %s\n",
2470                 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
2471                 cap_str, md->read_only ? "(ro)" : "");
2472
2473         if (mmc_blk_alloc_parts(card, md))
2474                 goto out;
2475
2476         dev_set_drvdata(&card->dev, md);
2477
2478         if (mmc_add_disk(md))
2479                 goto out;
2480
2481         list_for_each_entry(part_md, &md->part, part) {
2482                 if (mmc_add_disk(part_md))
2483                         goto out;
2484         }
2485
2486         pm_runtime_set_autosuspend_delay(&card->dev, 3000);
2487         pm_runtime_use_autosuspend(&card->dev);
2488
2489         /*
2490          * Don't enable runtime PM for SD-combo cards here. Leave that
2491          * decision to be taken during the SDIO init sequence instead.
2492          */
2493         if (card->type != MMC_TYPE_SD_COMBO) {
2494                 pm_runtime_set_active(&card->dev);
2495                 pm_runtime_enable(&card->dev);
2496         }
2497
2498         return 0;
2499
2500  out:
2501         mmc_blk_remove_parts(card, md);
2502         mmc_blk_remove_req(md);
2503         return 0;
2504 }
2505
2506 static void mmc_blk_remove(struct mmc_card *card)
2507 {
2508         struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
2509
2510         mmc_blk_remove_parts(card, md);
2511         pm_runtime_get_sync(&card->dev);
2512         mmc_claim_host(card->host);
2513         mmc_blk_part_switch(card, md);
2514         mmc_release_host(card->host);
2515         if (card->type != MMC_TYPE_SD_COMBO)
2516                 pm_runtime_disable(&card->dev);
2517         pm_runtime_put_noidle(&card->dev);
2518         mmc_blk_remove_req(md);
2519         dev_set_drvdata(&card->dev, NULL);
2520 }
2521
2522 static int _mmc_blk_suspend(struct mmc_card *card)
2523 {
2524         struct mmc_blk_data *part_md;
2525         struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
2526
2527         if (md) {
2528                 mmc_queue_suspend(&md->queue);
2529                 list_for_each_entry(part_md, &md->part, part) {
2530                         mmc_queue_suspend(&part_md->queue);
2531                 }
2532         }
2533         return 0;
2534 }
2535
2536 static void mmc_blk_shutdown(struct mmc_card *card)
2537 {
2538         _mmc_blk_suspend(card);
2539 }
2540
2541 #ifdef CONFIG_PM_SLEEP
2542 static int mmc_blk_suspend(struct device *dev)
2543 {
2544         struct mmc_card *card = mmc_dev_to_card(dev);
2545
2546         return _mmc_blk_suspend(card);
2547 }
2548
2549 static int mmc_blk_resume(struct device *dev)
2550 {
2551         struct mmc_blk_data *part_md;
2552         struct mmc_blk_data *md = dev_get_drvdata(dev);
2553
2554         if (md) {
2555                 /*
2556                  * Resume involves the card going into idle state,
2557                  * so current partition is always the main one.
2558                  */
2559                 md->part_curr = md->part_type;
2560                 mmc_queue_resume(&md->queue);
2561                 list_for_each_entry(part_md, &md->part, part) {
2562                         mmc_queue_resume(&part_md->queue);
2563                 }
2564         }
2565         return 0;
2566 }
2567 #endif
2568
2569 static SIMPLE_DEV_PM_OPS(mmc_blk_pm_ops, mmc_blk_suspend, mmc_blk_resume);
2570
2571 static struct mmc_driver mmc_driver = {
2572         .drv            = {
2573                 .name   = "mmcblk",
2574                 .pm     = &mmc_blk_pm_ops,
2575         },
2576         .probe          = mmc_blk_probe,
2577         .remove         = mmc_blk_remove,
2578         .shutdown       = mmc_blk_shutdown,
2579 };
2580
2581 static int __init mmc_blk_init(void)
2582 {
2583         int res;
2584
2585         if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
2586                 pr_info("mmcblk: using %d minors per device\n", perdev_minors);
2587
2588         max_devices = min(MAX_DEVICES, (1 << MINORBITS) / perdev_minors);
2589
2590         res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
2591         if (res)
2592                 goto out;
2593
2594         res = mmc_register_driver(&mmc_driver);
2595         if (res)
2596                 goto out2;
2597
2598         return 0;
2599  out2:
2600         unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
2601  out:
2602         return res;
2603 }
2604
2605 static void __exit mmc_blk_exit(void)
2606 {
2607         mmc_unregister_driver(&mmc_driver);
2608         unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
2609 }
2610
2611 module_init(mmc_blk_init);
2612 module_exit(mmc_blk_exit);
2613
2614 MODULE_LICENSE("GPL");
2615 MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
2616