Merge remote-tracking branches 'asoc/fix/msm8916', 'asoc/fix/nau8825', 'asoc/fix...
[sfrench/cifs-2.6.git] / drivers / mmc / core / block.c
1 /*
2  * Block driver for media (i.e., flash cards)
3  *
4  * Copyright 2002 Hewlett-Packard Company
5  * Copyright 2005-2008 Pierre Ossman
6  *
7  * Use consistent with the GNU GPL is permitted,
8  * provided that this copyright notice is
9  * preserved in its entirety in all copies and derived works.
10  *
11  * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
12  * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
13  * FITNESS FOR ANY PARTICULAR PURPOSE.
14  *
15  * Many thanks to Alessandro Rubini and Jonathan Corbet!
16  *
17  * Author:  Andrew Christian
18  *          28 May 2002
19  */
20 #include <linux/moduleparam.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
23
24 #include <linux/kernel.h>
25 #include <linux/fs.h>
26 #include <linux/slab.h>
27 #include <linux/errno.h>
28 #include <linux/hdreg.h>
29 #include <linux/kdev_t.h>
30 #include <linux/blkdev.h>
31 #include <linux/cdev.h>
32 #include <linux/mutex.h>
33 #include <linux/scatterlist.h>
34 #include <linux/string_helpers.h>
35 #include <linux/delay.h>
36 #include <linux/capability.h>
37 #include <linux/compat.h>
38 #include <linux/pm_runtime.h>
39 #include <linux/idr.h>
40 #include <linux/debugfs.h>
41
42 #include <linux/mmc/ioctl.h>
43 #include <linux/mmc/card.h>
44 #include <linux/mmc/host.h>
45 #include <linux/mmc/mmc.h>
46 #include <linux/mmc/sd.h>
47
48 #include <linux/uaccess.h>
49
50 #include "queue.h"
51 #include "block.h"
52 #include "core.h"
53 #include "card.h"
54 #include "host.h"
55 #include "bus.h"
56 #include "mmc_ops.h"
57 #include "quirks.h"
58 #include "sd_ops.h"
59
60 MODULE_ALIAS("mmc:block");
61 #ifdef MODULE_PARAM_PREFIX
62 #undef MODULE_PARAM_PREFIX
63 #endif
64 #define MODULE_PARAM_PREFIX "mmcblk."
65
66 #define MMC_BLK_TIMEOUT_MS  (10 * 60 * 1000)        /* 10 minute timeout */
67 #define MMC_SANITIZE_REQ_TIMEOUT 240000
68 #define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
69
70 #define mmc_req_rel_wr(req)     ((req->cmd_flags & REQ_FUA) && \
71                                   (rq_data_dir(req) == WRITE))
72 static DEFINE_MUTEX(block_mutex);
73
74 /*
75  * The defaults come from config options but can be overriden by module
76  * or bootarg options.
77  */
78 static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
79
80 /*
81  * We've only got one major, so number of mmcblk devices is
82  * limited to (1 << 20) / number of minors per device.  It is also
83  * limited by the MAX_DEVICES below.
84  */
85 static int max_devices;
86
87 #define MAX_DEVICES 256
88
89 static DEFINE_IDA(mmc_blk_ida);
90 static DEFINE_IDA(mmc_rpmb_ida);
91
92 /*
93  * There is one mmc_blk_data per slot.
94  */
95 struct mmc_blk_data {
96         spinlock_t      lock;
97         struct device   *parent;
98         struct gendisk  *disk;
99         struct mmc_queue queue;
100         struct list_head part;
101         struct list_head rpmbs;
102
103         unsigned int    flags;
104 #define MMC_BLK_CMD23   (1 << 0)        /* Can do SET_BLOCK_COUNT for multiblock */
105 #define MMC_BLK_REL_WR  (1 << 1)        /* MMC Reliable write support */
106
107         unsigned int    usage;
108         unsigned int    read_only;
109         unsigned int    part_type;
110         unsigned int    reset_done;
111 #define MMC_BLK_READ            BIT(0)
112 #define MMC_BLK_WRITE           BIT(1)
113 #define MMC_BLK_DISCARD         BIT(2)
114 #define MMC_BLK_SECDISCARD      BIT(3)
115
116         /*
117          * Only set in main mmc_blk_data associated
118          * with mmc_card with dev_set_drvdata, and keeps
119          * track of the current selected device partition.
120          */
121         unsigned int    part_curr;
122         struct device_attribute force_ro;
123         struct device_attribute power_ro_lock;
124         int     area_type;
125
126         /* debugfs files (only in main mmc_blk_data) */
127         struct dentry *status_dentry;
128         struct dentry *ext_csd_dentry;
129 };
130
131 /* Device type for RPMB character devices */
132 static dev_t mmc_rpmb_devt;
133
134 /* Bus type for RPMB character devices */
135 static struct bus_type mmc_rpmb_bus_type = {
136         .name = "mmc_rpmb",
137 };
138
139 /**
140  * struct mmc_rpmb_data - special RPMB device type for these areas
141  * @dev: the device for the RPMB area
142  * @chrdev: character device for the RPMB area
143  * @id: unique device ID number
144  * @part_index: partition index (0 on first)
145  * @md: parent MMC block device
146  * @node: list item, so we can put this device on a list
147  */
148 struct mmc_rpmb_data {
149         struct device dev;
150         struct cdev chrdev;
151         int id;
152         unsigned int part_index;
153         struct mmc_blk_data *md;
154         struct list_head node;
155 };
156
157 static DEFINE_MUTEX(open_lock);
158
159 module_param(perdev_minors, int, 0444);
160 MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
161
162 static inline int mmc_blk_part_switch(struct mmc_card *card,
163                                       unsigned int part_type);
164
165 static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
166 {
167         struct mmc_blk_data *md;
168
169         mutex_lock(&open_lock);
170         md = disk->private_data;
171         if (md && md->usage == 0)
172                 md = NULL;
173         if (md)
174                 md->usage++;
175         mutex_unlock(&open_lock);
176
177         return md;
178 }
179
180 static inline int mmc_get_devidx(struct gendisk *disk)
181 {
182         int devidx = disk->first_minor / perdev_minors;
183         return devidx;
184 }
185
186 static void mmc_blk_put(struct mmc_blk_data *md)
187 {
188         mutex_lock(&open_lock);
189         md->usage--;
190         if (md->usage == 0) {
191                 int devidx = mmc_get_devidx(md->disk);
192                 blk_cleanup_queue(md->queue.queue);
193                 ida_simple_remove(&mmc_blk_ida, devidx);
194                 put_disk(md->disk);
195                 kfree(md);
196         }
197         mutex_unlock(&open_lock);
198 }
199
200 static ssize_t power_ro_lock_show(struct device *dev,
201                 struct device_attribute *attr, char *buf)
202 {
203         int ret;
204         struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
205         struct mmc_card *card = md->queue.card;
206         int locked = 0;
207
208         if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN)
209                 locked = 2;
210         else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
211                 locked = 1;
212
213         ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
214
215         mmc_blk_put(md);
216
217         return ret;
218 }
219
220 static ssize_t power_ro_lock_store(struct device *dev,
221                 struct device_attribute *attr, const char *buf, size_t count)
222 {
223         int ret;
224         struct mmc_blk_data *md, *part_md;
225         struct mmc_queue *mq;
226         struct request *req;
227         unsigned long set;
228
229         if (kstrtoul(buf, 0, &set))
230                 return -EINVAL;
231
232         if (set != 1)
233                 return count;
234
235         md = mmc_blk_get(dev_to_disk(dev));
236         mq = &md->queue;
237
238         /* Dispatch locking to the block layer */
239         req = blk_get_request(mq->queue, REQ_OP_DRV_OUT, __GFP_RECLAIM);
240         if (IS_ERR(req)) {
241                 count = PTR_ERR(req);
242                 goto out_put;
243         }
244         req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP;
245         blk_execute_rq(mq->queue, NULL, req, 0);
246         ret = req_to_mmc_queue_req(req)->drv_op_result;
247         blk_put_request(req);
248
249         if (!ret) {
250                 pr_info("%s: Locking boot partition ro until next power on\n",
251                         md->disk->disk_name);
252                 set_disk_ro(md->disk, 1);
253
254                 list_for_each_entry(part_md, &md->part, part)
255                         if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) {
256                                 pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name);
257                                 set_disk_ro(part_md->disk, 1);
258                         }
259         }
260 out_put:
261         mmc_blk_put(md);
262         return count;
263 }
264
265 static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
266                              char *buf)
267 {
268         int ret;
269         struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
270
271         ret = snprintf(buf, PAGE_SIZE, "%d\n",
272                        get_disk_ro(dev_to_disk(dev)) ^
273                        md->read_only);
274         mmc_blk_put(md);
275         return ret;
276 }
277
278 static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
279                               const char *buf, size_t count)
280 {
281         int ret;
282         char *end;
283         struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
284         unsigned long set = simple_strtoul(buf, &end, 0);
285         if (end == buf) {
286                 ret = -EINVAL;
287                 goto out;
288         }
289
290         set_disk_ro(dev_to_disk(dev), set || md->read_only);
291         ret = count;
292 out:
293         mmc_blk_put(md);
294         return ret;
295 }
296
297 static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
298 {
299         struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
300         int ret = -ENXIO;
301
302         mutex_lock(&block_mutex);
303         if (md) {
304                 if (md->usage == 2)
305                         check_disk_change(bdev);
306                 ret = 0;
307
308                 if ((mode & FMODE_WRITE) && md->read_only) {
309                         mmc_blk_put(md);
310                         ret = -EROFS;
311                 }
312         }
313         mutex_unlock(&block_mutex);
314
315         return ret;
316 }
317
318 static void mmc_blk_release(struct gendisk *disk, fmode_t mode)
319 {
320         struct mmc_blk_data *md = disk->private_data;
321
322         mutex_lock(&block_mutex);
323         mmc_blk_put(md);
324         mutex_unlock(&block_mutex);
325 }
326
327 static int
328 mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
329 {
330         geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
331         geo->heads = 4;
332         geo->sectors = 16;
333         return 0;
334 }
335
336 struct mmc_blk_ioc_data {
337         struct mmc_ioc_cmd ic;
338         unsigned char *buf;
339         u64 buf_bytes;
340         struct mmc_rpmb_data *rpmb;
341 };
342
343 static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
344         struct mmc_ioc_cmd __user *user)
345 {
346         struct mmc_blk_ioc_data *idata;
347         int err;
348
349         idata = kmalloc(sizeof(*idata), GFP_KERNEL);
350         if (!idata) {
351                 err = -ENOMEM;
352                 goto out;
353         }
354
355         if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
356                 err = -EFAULT;
357                 goto idata_err;
358         }
359
360         idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
361         if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
362                 err = -EOVERFLOW;
363                 goto idata_err;
364         }
365
366         if (!idata->buf_bytes) {
367                 idata->buf = NULL;
368                 return idata;
369         }
370
371         idata->buf = kmalloc(idata->buf_bytes, GFP_KERNEL);
372         if (!idata->buf) {
373                 err = -ENOMEM;
374                 goto idata_err;
375         }
376
377         if (copy_from_user(idata->buf, (void __user *)(unsigned long)
378                                         idata->ic.data_ptr, idata->buf_bytes)) {
379                 err = -EFAULT;
380                 goto copy_err;
381         }
382
383         return idata;
384
385 copy_err:
386         kfree(idata->buf);
387 idata_err:
388         kfree(idata);
389 out:
390         return ERR_PTR(err);
391 }
392
393 static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr,
394                                       struct mmc_blk_ioc_data *idata)
395 {
396         struct mmc_ioc_cmd *ic = &idata->ic;
397
398         if (copy_to_user(&(ic_ptr->response), ic->response,
399                          sizeof(ic->response)))
400                 return -EFAULT;
401
402         if (!idata->ic.write_flag) {
403                 if (copy_to_user((void __user *)(unsigned long)ic->data_ptr,
404                                  idata->buf, idata->buf_bytes))
405                         return -EFAULT;
406         }
407
408         return 0;
409 }
410
411 static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status,
412                                        u32 retries_max)
413 {
414         int err;
415         u32 retry_count = 0;
416
417         if (!status || !retries_max)
418                 return -EINVAL;
419
420         do {
421                 err = __mmc_send_status(card, status, 5);
422                 if (err)
423                         break;
424
425                 if (!R1_STATUS(*status) &&
426                                 (R1_CURRENT_STATE(*status) != R1_STATE_PRG))
427                         break; /* RPMB programming operation complete */
428
429                 /*
430                  * Rechedule to give the MMC device a chance to continue
431                  * processing the previous command without being polled too
432                  * frequently.
433                  */
434                 usleep_range(1000, 5000);
435         } while (++retry_count < retries_max);
436
437         if (retry_count == retries_max)
438                 err = -EPERM;
439
440         return err;
441 }
442
443 static int ioctl_do_sanitize(struct mmc_card *card)
444 {
445         int err;
446
447         if (!mmc_can_sanitize(card)) {
448                         pr_warn("%s: %s - SANITIZE is not supported\n",
449                                 mmc_hostname(card->host), __func__);
450                         err = -EOPNOTSUPP;
451                         goto out;
452         }
453
454         pr_debug("%s: %s - SANITIZE IN PROGRESS...\n",
455                 mmc_hostname(card->host), __func__);
456
457         err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
458                                         EXT_CSD_SANITIZE_START, 1,
459                                         MMC_SANITIZE_REQ_TIMEOUT);
460
461         if (err)
462                 pr_err("%s: %s - EXT_CSD_SANITIZE_START failed. err=%d\n",
463                        mmc_hostname(card->host), __func__, err);
464
465         pr_debug("%s: %s - SANITIZE COMPLETED\n", mmc_hostname(card->host),
466                                              __func__);
467 out:
468         return err;
469 }
470
471 static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
472                                struct mmc_blk_ioc_data *idata)
473 {
474         struct mmc_command cmd = {};
475         struct mmc_data data = {};
476         struct mmc_request mrq = {};
477         struct scatterlist sg;
478         int err;
479         unsigned int target_part;
480         u32 status = 0;
481
482         if (!card || !md || !idata)
483                 return -EINVAL;
484
485         /*
486          * The RPMB accesses comes in from the character device, so we
487          * need to target these explicitly. Else we just target the
488          * partition type for the block device the ioctl() was issued
489          * on.
490          */
491         if (idata->rpmb) {
492                 /* Support multiple RPMB partitions */
493                 target_part = idata->rpmb->part_index;
494                 target_part |= EXT_CSD_PART_CONFIG_ACC_RPMB;
495         } else {
496                 target_part = md->part_type;
497         }
498
499         cmd.opcode = idata->ic.opcode;
500         cmd.arg = idata->ic.arg;
501         cmd.flags = idata->ic.flags;
502
503         if (idata->buf_bytes) {
504                 data.sg = &sg;
505                 data.sg_len = 1;
506                 data.blksz = idata->ic.blksz;
507                 data.blocks = idata->ic.blocks;
508
509                 sg_init_one(data.sg, idata->buf, idata->buf_bytes);
510
511                 if (idata->ic.write_flag)
512                         data.flags = MMC_DATA_WRITE;
513                 else
514                         data.flags = MMC_DATA_READ;
515
516                 /* data.flags must already be set before doing this. */
517                 mmc_set_data_timeout(&data, card);
518
519                 /* Allow overriding the timeout_ns for empirical tuning. */
520                 if (idata->ic.data_timeout_ns)
521                         data.timeout_ns = idata->ic.data_timeout_ns;
522
523                 if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
524                         /*
525                          * Pretend this is a data transfer and rely on the
526                          * host driver to compute timeout.  When all host
527                          * drivers support cmd.cmd_timeout for R1B, this
528                          * can be changed to:
529                          *
530                          *     mrq.data = NULL;
531                          *     cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
532                          */
533                         data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
534                 }
535
536                 mrq.data = &data;
537         }
538
539         mrq.cmd = &cmd;
540
541         err = mmc_blk_part_switch(card, target_part);
542         if (err)
543                 return err;
544
545         if (idata->ic.is_acmd) {
546                 err = mmc_app_cmd(card->host, card);
547                 if (err)
548                         return err;
549         }
550
551         if (idata->rpmb) {
552                 err = mmc_set_blockcount(card, data.blocks,
553                         idata->ic.write_flag & (1 << 31));
554                 if (err)
555                         return err;
556         }
557
558         if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) &&
559             (cmd.opcode == MMC_SWITCH)) {
560                 err = ioctl_do_sanitize(card);
561
562                 if (err)
563                         pr_err("%s: ioctl_do_sanitize() failed. err = %d",
564                                __func__, err);
565
566                 return err;
567         }
568
569         mmc_wait_for_req(card->host, &mrq);
570
571         if (cmd.error) {
572                 dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
573                                                 __func__, cmd.error);
574                 return cmd.error;
575         }
576         if (data.error) {
577                 dev_err(mmc_dev(card->host), "%s: data error %d\n",
578                                                 __func__, data.error);
579                 return data.error;
580         }
581
582         /*
583          * According to the SD specs, some commands require a delay after
584          * issuing the command.
585          */
586         if (idata->ic.postsleep_min_us)
587                 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
588
589         memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp));
590
591         if (idata->rpmb) {
592                 /*
593                  * Ensure RPMB command has completed by polling CMD13
594                  * "Send Status".
595                  */
596                 err = ioctl_rpmb_card_status_poll(card, &status, 5);
597                 if (err)
598                         dev_err(mmc_dev(card->host),
599                                         "%s: Card Status=0x%08X, error %d\n",
600                                         __func__, status, err);
601         }
602
603         return err;
604 }
605
606 static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
607                              struct mmc_ioc_cmd __user *ic_ptr,
608                              struct mmc_rpmb_data *rpmb)
609 {
610         struct mmc_blk_ioc_data *idata;
611         struct mmc_blk_ioc_data *idatas[1];
612         struct mmc_queue *mq;
613         struct mmc_card *card;
614         int err = 0, ioc_err = 0;
615         struct request *req;
616
617         idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
618         if (IS_ERR(idata))
619                 return PTR_ERR(idata);
620         /* This will be NULL on non-RPMB ioctl():s */
621         idata->rpmb = rpmb;
622
623         card = md->queue.card;
624         if (IS_ERR(card)) {
625                 err = PTR_ERR(card);
626                 goto cmd_done;
627         }
628
629         /*
630          * Dispatch the ioctl() into the block request queue.
631          */
632         mq = &md->queue;
633         req = blk_get_request(mq->queue,
634                 idata->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
635                 __GFP_RECLAIM);
636         if (IS_ERR(req)) {
637                 err = PTR_ERR(req);
638                 goto cmd_done;
639         }
640         idatas[0] = idata;
641         req_to_mmc_queue_req(req)->drv_op =
642                 rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
643         req_to_mmc_queue_req(req)->drv_op_data = idatas;
644         req_to_mmc_queue_req(req)->ioc_count = 1;
645         blk_execute_rq(mq->queue, NULL, req, 0);
646         ioc_err = req_to_mmc_queue_req(req)->drv_op_result;
647         err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
648         blk_put_request(req);
649
650 cmd_done:
651         kfree(idata->buf);
652         kfree(idata);
653         return ioc_err ? ioc_err : err;
654 }
655
656 static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
657                                    struct mmc_ioc_multi_cmd __user *user,
658                                    struct mmc_rpmb_data *rpmb)
659 {
660         struct mmc_blk_ioc_data **idata = NULL;
661         struct mmc_ioc_cmd __user *cmds = user->cmds;
662         struct mmc_card *card;
663         struct mmc_queue *mq;
664         int i, err = 0, ioc_err = 0;
665         __u64 num_of_cmds;
666         struct request *req;
667
668         if (copy_from_user(&num_of_cmds, &user->num_of_cmds,
669                            sizeof(num_of_cmds)))
670                 return -EFAULT;
671
672         if (!num_of_cmds)
673                 return 0;
674
675         if (num_of_cmds > MMC_IOC_MAX_CMDS)
676                 return -EINVAL;
677
678         idata = kcalloc(num_of_cmds, sizeof(*idata), GFP_KERNEL);
679         if (!idata)
680                 return -ENOMEM;
681
682         for (i = 0; i < num_of_cmds; i++) {
683                 idata[i] = mmc_blk_ioctl_copy_from_user(&cmds[i]);
684                 if (IS_ERR(idata[i])) {
685                         err = PTR_ERR(idata[i]);
686                         num_of_cmds = i;
687                         goto cmd_err;
688                 }
689                 /* This will be NULL on non-RPMB ioctl():s */
690                 idata[i]->rpmb = rpmb;
691         }
692
693         card = md->queue.card;
694         if (IS_ERR(card)) {
695                 err = PTR_ERR(card);
696                 goto cmd_err;
697         }
698
699
700         /*
701          * Dispatch the ioctl()s into the block request queue.
702          */
703         mq = &md->queue;
704         req = blk_get_request(mq->queue,
705                 idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
706                 __GFP_RECLAIM);
707         if (IS_ERR(req)) {
708                 err = PTR_ERR(req);
709                 goto cmd_err;
710         }
711         req_to_mmc_queue_req(req)->drv_op =
712                 rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
713         req_to_mmc_queue_req(req)->drv_op_data = idata;
714         req_to_mmc_queue_req(req)->ioc_count = num_of_cmds;
715         blk_execute_rq(mq->queue, NULL, req, 0);
716         ioc_err = req_to_mmc_queue_req(req)->drv_op_result;
717
718         /* copy to user if data and response */
719         for (i = 0; i < num_of_cmds && !err; i++)
720                 err = mmc_blk_ioctl_copy_to_user(&cmds[i], idata[i]);
721
722         blk_put_request(req);
723
724 cmd_err:
725         for (i = 0; i < num_of_cmds; i++) {
726                 kfree(idata[i]->buf);
727                 kfree(idata[i]);
728         }
729         kfree(idata);
730         return ioc_err ? ioc_err : err;
731 }
732
733 static int mmc_blk_check_blkdev(struct block_device *bdev)
734 {
735         /*
736          * The caller must have CAP_SYS_RAWIO, and must be calling this on the
737          * whole block device, not on a partition.  This prevents overspray
738          * between sibling partitions.
739          */
740         if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
741                 return -EPERM;
742         return 0;
743 }
744
745 static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
746         unsigned int cmd, unsigned long arg)
747 {
748         struct mmc_blk_data *md;
749         int ret;
750
751         switch (cmd) {
752         case MMC_IOC_CMD:
753                 ret = mmc_blk_check_blkdev(bdev);
754                 if (ret)
755                         return ret;
756                 md = mmc_blk_get(bdev->bd_disk);
757                 if (!md)
758                         return -EINVAL;
759                 ret = mmc_blk_ioctl_cmd(md,
760                                         (struct mmc_ioc_cmd __user *)arg,
761                                         NULL);
762                 mmc_blk_put(md);
763                 return ret;
764         case MMC_IOC_MULTI_CMD:
765                 ret = mmc_blk_check_blkdev(bdev);
766                 if (ret)
767                         return ret;
768                 md = mmc_blk_get(bdev->bd_disk);
769                 if (!md)
770                         return -EINVAL;
771                 ret = mmc_blk_ioctl_multi_cmd(md,
772                                         (struct mmc_ioc_multi_cmd __user *)arg,
773                                         NULL);
774                 mmc_blk_put(md);
775                 return ret;
776         default:
777                 return -EINVAL;
778         }
779 }
780
781 #ifdef CONFIG_COMPAT
782 static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
783         unsigned int cmd, unsigned long arg)
784 {
785         return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
786 }
787 #endif
788
789 static const struct block_device_operations mmc_bdops = {
790         .open                   = mmc_blk_open,
791         .release                = mmc_blk_release,
792         .getgeo                 = mmc_blk_getgeo,
793         .owner                  = THIS_MODULE,
794         .ioctl                  = mmc_blk_ioctl,
795 #ifdef CONFIG_COMPAT
796         .compat_ioctl           = mmc_blk_compat_ioctl,
797 #endif
798 };
799
800 static int mmc_blk_part_switch_pre(struct mmc_card *card,
801                                    unsigned int part_type)
802 {
803         int ret = 0;
804
805         if (part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) {
806                 if (card->ext_csd.cmdq_en) {
807                         ret = mmc_cmdq_disable(card);
808                         if (ret)
809                                 return ret;
810                 }
811                 mmc_retune_pause(card->host);
812         }
813
814         return ret;
815 }
816
817 static int mmc_blk_part_switch_post(struct mmc_card *card,
818                                     unsigned int part_type)
819 {
820         int ret = 0;
821
822         if (part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) {
823                 mmc_retune_unpause(card->host);
824                 if (card->reenable_cmdq && !card->ext_csd.cmdq_en)
825                         ret = mmc_cmdq_enable(card);
826         }
827
828         return ret;
829 }
830
831 static inline int mmc_blk_part_switch(struct mmc_card *card,
832                                       unsigned int part_type)
833 {
834         int ret = 0;
835         struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev);
836
837         if (main_md->part_curr == part_type)
838                 return 0;
839
840         if (mmc_card_mmc(card)) {
841                 u8 part_config = card->ext_csd.part_config;
842
843                 ret = mmc_blk_part_switch_pre(card, part_type);
844                 if (ret)
845                         return ret;
846
847                 part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
848                 part_config |= part_type;
849
850                 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
851                                  EXT_CSD_PART_CONFIG, part_config,
852                                  card->ext_csd.part_time);
853                 if (ret) {
854                         mmc_blk_part_switch_post(card, part_type);
855                         return ret;
856                 }
857
858                 card->ext_csd.part_config = part_config;
859
860                 ret = mmc_blk_part_switch_post(card, main_md->part_curr);
861         }
862
863         main_md->part_curr = part_type;
864         return ret;
865 }
866
867 static int mmc_sd_num_wr_blocks(struct mmc_card *card, u32 *written_blocks)
868 {
869         int err;
870         u32 result;
871         __be32 *blocks;
872
873         struct mmc_request mrq = {};
874         struct mmc_command cmd = {};
875         struct mmc_data data = {};
876
877         struct scatterlist sg;
878
879         cmd.opcode = MMC_APP_CMD;
880         cmd.arg = card->rca << 16;
881         cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
882
883         err = mmc_wait_for_cmd(card->host, &cmd, 0);
884         if (err)
885                 return err;
886         if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
887                 return -EIO;
888
889         memset(&cmd, 0, sizeof(struct mmc_command));
890
891         cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
892         cmd.arg = 0;
893         cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
894
895         data.blksz = 4;
896         data.blocks = 1;
897         data.flags = MMC_DATA_READ;
898         data.sg = &sg;
899         data.sg_len = 1;
900         mmc_set_data_timeout(&data, card);
901
902         mrq.cmd = &cmd;
903         mrq.data = &data;
904
905         blocks = kmalloc(4, GFP_KERNEL);
906         if (!blocks)
907                 return -ENOMEM;
908
909         sg_init_one(&sg, blocks, 4);
910
911         mmc_wait_for_req(card->host, &mrq);
912
913         result = ntohl(*blocks);
914         kfree(blocks);
915
916         if (cmd.error || data.error)
917                 return -EIO;
918
919         *written_blocks = result;
920
921         return 0;
922 }
923
924 static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
925                 bool hw_busy_detect, struct request *req, bool *gen_err)
926 {
927         unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
928         int err = 0;
929         u32 status;
930
931         do {
932                 err = __mmc_send_status(card, &status, 5);
933                 if (err) {
934                         pr_err("%s: error %d requesting status\n",
935                                req->rq_disk->disk_name, err);
936                         return err;
937                 }
938
939                 if (status & R1_ERROR) {
940                         pr_err("%s: %s: error sending status cmd, status %#x\n",
941                                 req->rq_disk->disk_name, __func__, status);
942                         *gen_err = true;
943                 }
944
945                 /* We may rely on the host hw to handle busy detection.*/
946                 if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) &&
947                         hw_busy_detect)
948                         break;
949
950                 /*
951                  * Timeout if the device never becomes ready for data and never
952                  * leaves the program state.
953                  */
954                 if (time_after(jiffies, timeout)) {
955                         pr_err("%s: Card stuck in programming state! %s %s\n",
956                                 mmc_hostname(card->host),
957                                 req->rq_disk->disk_name, __func__);
958                         return -ETIMEDOUT;
959                 }
960
961                 /*
962                  * Some cards mishandle the status bits,
963                  * so make sure to check both the busy
964                  * indication and the card state.
965                  */
966         } while (!(status & R1_READY_FOR_DATA) ||
967                  (R1_CURRENT_STATE(status) == R1_STATE_PRG));
968
969         return err;
970 }
971
972 static int send_stop(struct mmc_card *card, unsigned int timeout_ms,
973                 struct request *req, bool *gen_err, u32 *stop_status)
974 {
975         struct mmc_host *host = card->host;
976         struct mmc_command cmd = {};
977         int err;
978         bool use_r1b_resp = rq_data_dir(req) == WRITE;
979
980         /*
981          * Normally we use R1B responses for WRITE, but in cases where the host
982          * has specified a max_busy_timeout we need to validate it. A failure
983          * means we need to prevent the host from doing hw busy detection, which
984          * is done by converting to a R1 response instead.
985          */
986         if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout))
987                 use_r1b_resp = false;
988
989         cmd.opcode = MMC_STOP_TRANSMISSION;
990         if (use_r1b_resp) {
991                 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
992                 cmd.busy_timeout = timeout_ms;
993         } else {
994                 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
995         }
996
997         err = mmc_wait_for_cmd(host, &cmd, 5);
998         if (err)
999                 return err;
1000
1001         *stop_status = cmd.resp[0];
1002
1003         /* No need to check card status in case of READ. */
1004         if (rq_data_dir(req) == READ)
1005                 return 0;
1006
1007         if (!mmc_host_is_spi(host) &&
1008                 (*stop_status & R1_ERROR)) {
1009                 pr_err("%s: %s: general error sending stop command, resp %#x\n",
1010                         req->rq_disk->disk_name, __func__, *stop_status);
1011                 *gen_err = true;
1012         }
1013
1014         return card_busy_detect(card, timeout_ms, use_r1b_resp, req, gen_err);
1015 }
1016
1017 #define ERR_NOMEDIUM    3
1018 #define ERR_RETRY       2
1019 #define ERR_ABORT       1
1020 #define ERR_CONTINUE    0
1021
1022 static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
1023         bool status_valid, u32 status)
1024 {
1025         switch (error) {
1026         case -EILSEQ:
1027                 /* response crc error, retry the r/w cmd */
1028                 pr_err("%s: %s sending %s command, card status %#x\n",
1029                         req->rq_disk->disk_name, "response CRC error",
1030                         name, status);
1031                 return ERR_RETRY;
1032
1033         case -ETIMEDOUT:
1034                 pr_err("%s: %s sending %s command, card status %#x\n",
1035                         req->rq_disk->disk_name, "timed out", name, status);
1036
1037                 /* If the status cmd initially failed, retry the r/w cmd */
1038                 if (!status_valid) {
1039                         pr_err("%s: status not valid, retrying timeout\n",
1040                                 req->rq_disk->disk_name);
1041                         return ERR_RETRY;
1042                 }
1043
1044                 /*
1045                  * If it was a r/w cmd crc error, or illegal command
1046                  * (eg, issued in wrong state) then retry - we should
1047                  * have corrected the state problem above.
1048                  */
1049                 if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) {
1050                         pr_err("%s: command error, retrying timeout\n",
1051                                 req->rq_disk->disk_name);
1052                         return ERR_RETRY;
1053                 }
1054
1055                 /* Otherwise abort the command */
1056                 return ERR_ABORT;
1057
1058         default:
1059                 /* We don't understand the error code the driver gave us */
1060                 pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
1061                        req->rq_disk->disk_name, error, status);
1062                 return ERR_ABORT;
1063         }
1064 }
1065
1066 /*
1067  * Initial r/w and stop cmd error recovery.
1068  * We don't know whether the card received the r/w cmd or not, so try to
1069  * restore things back to a sane state.  Essentially, we do this as follows:
1070  * - Obtain card status.  If the first attempt to obtain card status fails,
1071  *   the status word will reflect the failed status cmd, not the failed
1072  *   r/w cmd.  If we fail to obtain card status, it suggests we can no
1073  *   longer communicate with the card.
1074  * - Check the card state.  If the card received the cmd but there was a
1075  *   transient problem with the response, it might still be in a data transfer
1076  *   mode.  Try to send it a stop command.  If this fails, we can't recover.
1077  * - If the r/w cmd failed due to a response CRC error, it was probably
1078  *   transient, so retry the cmd.
1079  * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
1080  * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
1081  *   illegal cmd, retry.
1082  * Otherwise we don't understand what happened, so abort.
1083  */
1084 static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
1085         struct mmc_blk_request *brq, bool *ecc_err, bool *gen_err)
1086 {
1087         bool prev_cmd_status_valid = true;
1088         u32 status, stop_status = 0;
1089         int err, retry;
1090
1091         if (mmc_card_removed(card))
1092                 return ERR_NOMEDIUM;
1093
1094         /*
1095          * Try to get card status which indicates both the card state
1096          * and why there was no response.  If the first attempt fails,
1097          * we can't be sure the returned status is for the r/w command.
1098          */
1099         for (retry = 2; retry >= 0; retry--) {
1100                 err = __mmc_send_status(card, &status, 0);
1101                 if (!err)
1102                         break;
1103
1104                 /* Re-tune if needed */
1105                 mmc_retune_recheck(card->host);
1106
1107                 prev_cmd_status_valid = false;
1108                 pr_err("%s: error %d sending status command, %sing\n",
1109                        req->rq_disk->disk_name, err, retry ? "retry" : "abort");
1110         }
1111
1112         /* We couldn't get a response from the card.  Give up. */
1113         if (err) {
1114                 /* Check if the card is removed */
1115                 if (mmc_detect_card_removed(card->host))
1116                         return ERR_NOMEDIUM;
1117                 return ERR_ABORT;
1118         }
1119
1120         /* Flag ECC errors */
1121         if ((status & R1_CARD_ECC_FAILED) ||
1122             (brq->stop.resp[0] & R1_CARD_ECC_FAILED) ||
1123             (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
1124                 *ecc_err = true;
1125
1126         /* Flag General errors */
1127         if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
1128                 if ((status & R1_ERROR) ||
1129                         (brq->stop.resp[0] & R1_ERROR)) {
1130                         pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
1131                                req->rq_disk->disk_name, __func__,
1132                                brq->stop.resp[0], status);
1133                         *gen_err = true;
1134                 }
1135
1136         /*
1137          * Check the current card state.  If it is in some data transfer
1138          * mode, tell it to stop (and hopefully transition back to TRAN.)
1139          */
1140         if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
1141             R1_CURRENT_STATE(status) == R1_STATE_RCV) {
1142                 err = send_stop(card,
1143                         DIV_ROUND_UP(brq->data.timeout_ns, 1000000),
1144                         req, gen_err, &stop_status);
1145                 if (err) {
1146                         pr_err("%s: error %d sending stop command\n",
1147                                req->rq_disk->disk_name, err);
1148                         /*
1149                          * If the stop cmd also timed out, the card is probably
1150                          * not present, so abort. Other errors are bad news too.
1151                          */
1152                         return ERR_ABORT;
1153                 }
1154
1155                 if (stop_status & R1_CARD_ECC_FAILED)
1156                         *ecc_err = true;
1157         }
1158
1159         /* Check for set block count errors */
1160         if (brq->sbc.error)
1161                 return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error,
1162                                 prev_cmd_status_valid, status);
1163
1164         /* Check for r/w command errors */
1165         if (brq->cmd.error)
1166                 return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
1167                                 prev_cmd_status_valid, status);
1168
1169         /* Data errors */
1170         if (!brq->stop.error)
1171                 return ERR_CONTINUE;
1172
1173         /* Now for stop errors.  These aren't fatal to the transfer. */
1174         pr_info("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
1175                req->rq_disk->disk_name, brq->stop.error,
1176                brq->cmd.resp[0], status);
1177
1178         /*
1179          * Subsitute in our own stop status as this will give the error
1180          * state which happened during the execution of the r/w command.
1181          */
1182         if (stop_status) {
1183                 brq->stop.resp[0] = stop_status;
1184                 brq->stop.error = 0;
1185         }
1186         return ERR_CONTINUE;
1187 }
1188
1189 static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
1190                          int type)
1191 {
1192         int err;
1193
1194         if (md->reset_done & type)
1195                 return -EEXIST;
1196
1197         md->reset_done |= type;
1198         err = mmc_hw_reset(host);
1199         /* Ensure we switch back to the correct partition */
1200         if (err != -EOPNOTSUPP) {
1201                 struct mmc_blk_data *main_md =
1202                         dev_get_drvdata(&host->card->dev);
1203                 int part_err;
1204
1205                 main_md->part_curr = main_md->part_type;
1206                 part_err = mmc_blk_part_switch(host->card, md->part_type);
1207                 if (part_err) {
1208                         /*
1209                          * We have failed to get back into the correct
1210                          * partition, so we need to abort the whole request.
1211                          */
1212                         return -ENODEV;
1213                 }
1214         }
1215         return err;
1216 }
1217
1218 static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
1219 {
1220         md->reset_done &= ~type;
1221 }
1222
1223 /*
1224  * The non-block commands come back from the block layer after it queued it and
1225  * processed it with all other requests and then they get issued in this
1226  * function.
1227  */
1228 static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
1229 {
1230         struct mmc_queue_req *mq_rq;
1231         struct mmc_card *card = mq->card;
1232         struct mmc_blk_data *md = mq->blkdata;
1233         struct mmc_blk_ioc_data **idata;
1234         bool rpmb_ioctl;
1235         u8 **ext_csd;
1236         u32 status;
1237         int ret;
1238         int i;
1239
1240         mq_rq = req_to_mmc_queue_req(req);
1241         rpmb_ioctl = (mq_rq->drv_op == MMC_DRV_OP_IOCTL_RPMB);
1242
1243         switch (mq_rq->drv_op) {
1244         case MMC_DRV_OP_IOCTL:
1245         case MMC_DRV_OP_IOCTL_RPMB:
1246                 idata = mq_rq->drv_op_data;
1247                 for (i = 0, ret = 0; i < mq_rq->ioc_count; i++) {
1248                         ret = __mmc_blk_ioctl_cmd(card, md, idata[i]);
1249                         if (ret)
1250                                 break;
1251                 }
1252                 /* Always switch back to main area after RPMB access */
1253                 if (rpmb_ioctl)
1254                         mmc_blk_part_switch(card, 0);
1255                 break;
1256         case MMC_DRV_OP_BOOT_WP:
1257                 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
1258                                  card->ext_csd.boot_ro_lock |
1259                                  EXT_CSD_BOOT_WP_B_PWR_WP_EN,
1260                                  card->ext_csd.part_time);
1261                 if (ret)
1262                         pr_err("%s: Locking boot partition ro until next power on failed: %d\n",
1263                                md->disk->disk_name, ret);
1264                 else
1265                         card->ext_csd.boot_ro_lock |=
1266                                 EXT_CSD_BOOT_WP_B_PWR_WP_EN;
1267                 break;
1268         case MMC_DRV_OP_GET_CARD_STATUS:
1269                 ret = mmc_send_status(card, &status);
1270                 if (!ret)
1271                         ret = status;
1272                 break;
1273         case MMC_DRV_OP_GET_EXT_CSD:
1274                 ext_csd = mq_rq->drv_op_data;
1275                 ret = mmc_get_ext_csd(card, ext_csd);
1276                 break;
1277         default:
1278                 pr_err("%s: unknown driver specific operation\n",
1279                        md->disk->disk_name);
1280                 ret = -EINVAL;
1281                 break;
1282         }
1283         mq_rq->drv_op_result = ret;
1284         blk_end_request_all(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
1285 }
1286
1287 static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
1288 {
1289         struct mmc_blk_data *md = mq->blkdata;
1290         struct mmc_card *card = md->queue.card;
1291         unsigned int from, nr, arg;
1292         int err = 0, type = MMC_BLK_DISCARD;
1293         blk_status_t status = BLK_STS_OK;
1294
1295         if (!mmc_can_erase(card)) {
1296                 status = BLK_STS_NOTSUPP;
1297                 goto fail;
1298         }
1299
1300         from = blk_rq_pos(req);
1301         nr = blk_rq_sectors(req);
1302
1303         if (mmc_can_discard(card))
1304                 arg = MMC_DISCARD_ARG;
1305         else if (mmc_can_trim(card))
1306                 arg = MMC_TRIM_ARG;
1307         else
1308                 arg = MMC_ERASE_ARG;
1309         do {
1310                 err = 0;
1311                 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1312                         err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1313                                          INAND_CMD38_ARG_EXT_CSD,
1314                                          arg == MMC_TRIM_ARG ?
1315                                          INAND_CMD38_ARG_TRIM :
1316                                          INAND_CMD38_ARG_ERASE,
1317                                          0);
1318                 }
1319                 if (!err)
1320                         err = mmc_erase(card, from, nr, arg);
1321         } while (err == -EIO && !mmc_blk_reset(md, card->host, type));
1322         if (err)
1323                 status = BLK_STS_IOERR;
1324         else
1325                 mmc_blk_reset_success(md, type);
1326 fail:
1327         blk_end_request(req, status, blk_rq_bytes(req));
1328 }
1329
1330 static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
1331                                        struct request *req)
1332 {
1333         struct mmc_blk_data *md = mq->blkdata;
1334         struct mmc_card *card = md->queue.card;
1335         unsigned int from, nr, arg;
1336         int err = 0, type = MMC_BLK_SECDISCARD;
1337         blk_status_t status = BLK_STS_OK;
1338
1339         if (!(mmc_can_secure_erase_trim(card))) {
1340                 status = BLK_STS_NOTSUPP;
1341                 goto out;
1342         }
1343
1344         from = blk_rq_pos(req);
1345         nr = blk_rq_sectors(req);
1346
1347         if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
1348                 arg = MMC_SECURE_TRIM1_ARG;
1349         else
1350                 arg = MMC_SECURE_ERASE_ARG;
1351
1352 retry:
1353         if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1354                 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1355                                  INAND_CMD38_ARG_EXT_CSD,
1356                                  arg == MMC_SECURE_TRIM1_ARG ?
1357                                  INAND_CMD38_ARG_SECTRIM1 :
1358                                  INAND_CMD38_ARG_SECERASE,
1359                                  0);
1360                 if (err)
1361                         goto out_retry;
1362         }
1363
1364         err = mmc_erase(card, from, nr, arg);
1365         if (err == -EIO)
1366                 goto out_retry;
1367         if (err) {
1368                 status = BLK_STS_IOERR;
1369                 goto out;
1370         }
1371
1372         if (arg == MMC_SECURE_TRIM1_ARG) {
1373                 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1374                         err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1375                                          INAND_CMD38_ARG_EXT_CSD,
1376                                          INAND_CMD38_ARG_SECTRIM2,
1377                                          0);
1378                         if (err)
1379                                 goto out_retry;
1380                 }
1381
1382                 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
1383                 if (err == -EIO)
1384                         goto out_retry;
1385                 if (err) {
1386                         status = BLK_STS_IOERR;
1387                         goto out;
1388                 }
1389         }
1390
1391 out_retry:
1392         if (err && !mmc_blk_reset(md, card->host, type))
1393                 goto retry;
1394         if (!err)
1395                 mmc_blk_reset_success(md, type);
1396 out:
1397         blk_end_request(req, status, blk_rq_bytes(req));
1398 }
1399
1400 static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
1401 {
1402         struct mmc_blk_data *md = mq->blkdata;
1403         struct mmc_card *card = md->queue.card;
1404         int ret = 0;
1405
1406         ret = mmc_flush_cache(card);
1407         blk_end_request_all(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
1408 }
1409
1410 /*
1411  * Reformat current write as a reliable write, supporting
1412  * both legacy and the enhanced reliable write MMC cards.
1413  * In each transfer we'll handle only as much as a single
1414  * reliable write can handle, thus finish the request in
1415  * partial completions.
1416  */
1417 static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
1418                                     struct mmc_card *card,
1419                                     struct request *req)
1420 {
1421         if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
1422                 /* Legacy mode imposes restrictions on transfers. */
1423                 if (!IS_ALIGNED(blk_rq_pos(req), card->ext_csd.rel_sectors))
1424                         brq->data.blocks = 1;
1425
1426                 if (brq->data.blocks > card->ext_csd.rel_sectors)
1427                         brq->data.blocks = card->ext_csd.rel_sectors;
1428                 else if (brq->data.blocks < card->ext_csd.rel_sectors)
1429                         brq->data.blocks = 1;
1430         }
1431 }
1432
1433 #define CMD_ERRORS                                                      \
1434         (R1_OUT_OF_RANGE |      /* Command argument out of range */     \
1435          R1_ADDRESS_ERROR |     /* Misaligned address */                \
1436          R1_BLOCK_LEN_ERROR |   /* Transferred block length incorrect */\
1437          R1_WP_VIOLATION |      /* Tried to write to protected block */ \
1438          R1_CARD_ECC_FAILED |   /* Card ECC failed */                   \
1439          R1_CC_ERROR |          /* Card controller error */             \
1440          R1_ERROR)              /* General/unknown error */
1441
1442 static void mmc_blk_eval_resp_error(struct mmc_blk_request *brq)
1443 {
1444         u32 val;
1445
1446         /*
1447          * Per the SD specification(physical layer version 4.10)[1],
1448          * section 4.3.3, it explicitly states that "When the last
1449          * block of user area is read using CMD18, the host should
1450          * ignore OUT_OF_RANGE error that may occur even the sequence
1451          * is correct". And JESD84-B51 for eMMC also has a similar
1452          * statement on section 6.8.3.
1453          *
1454          * Multiple block read/write could be done by either predefined
1455          * method, namely CMD23, or open-ending mode. For open-ending mode,
1456          * we should ignore the OUT_OF_RANGE error as it's normal behaviour.
1457          *
1458          * However the spec[1] doesn't tell us whether we should also
1459          * ignore that for predefined method. But per the spec[1], section
1460          * 4.15 Set Block Count Command, it says"If illegal block count
1461          * is set, out of range error will be indicated during read/write
1462          * operation (For example, data transfer is stopped at user area
1463          * boundary)." In another word, we could expect a out of range error
1464          * in the response for the following CMD18/25. And if argument of
1465          * CMD23 + the argument of CMD18/25 exceed the max number of blocks,
1466          * we could also expect to get a -ETIMEDOUT or any error number from
1467          * the host drivers due to missing data response(for write)/data(for
1468          * read), as the cards will stop the data transfer by itself per the
1469          * spec. So we only need to check R1_OUT_OF_RANGE for open-ending mode.
1470          */
1471
1472         if (!brq->stop.error) {
1473                 bool oor_with_open_end;
1474                 /* If there is no error yet, check R1 response */
1475
1476                 val = brq->stop.resp[0] & CMD_ERRORS;
1477                 oor_with_open_end = val & R1_OUT_OF_RANGE && !brq->mrq.sbc;
1478
1479                 if (val && !oor_with_open_end)
1480                         brq->stop.error = -EIO;
1481         }
1482 }
1483
1484 static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card,
1485                                              struct mmc_async_req *areq)
1486 {
1487         struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
1488                                                     areq);
1489         struct mmc_blk_request *brq = &mq_mrq->brq;
1490         struct request *req = mmc_queue_req_to_req(mq_mrq);
1491         int need_retune = card->host->need_retune;
1492         bool ecc_err = false;
1493         bool gen_err = false;
1494
1495         /*
1496          * sbc.error indicates a problem with the set block count
1497          * command.  No data will have been transferred.
1498          *
1499          * cmd.error indicates a problem with the r/w command.  No
1500          * data will have been transferred.
1501          *
1502          * stop.error indicates a problem with the stop command.  Data
1503          * may have been transferred, or may still be transferring.
1504          */
1505
1506         mmc_blk_eval_resp_error(brq);
1507
1508         if (brq->sbc.error || brq->cmd.error ||
1509             brq->stop.error || brq->data.error) {
1510                 switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
1511                 case ERR_RETRY:
1512                         return MMC_BLK_RETRY;
1513                 case ERR_ABORT:
1514                         return MMC_BLK_ABORT;
1515                 case ERR_NOMEDIUM:
1516                         return MMC_BLK_NOMEDIUM;
1517                 case ERR_CONTINUE:
1518                         break;
1519                 }
1520         }
1521
1522         /*
1523          * Check for errors relating to the execution of the
1524          * initial command - such as address errors.  No data
1525          * has been transferred.
1526          */
1527         if (brq->cmd.resp[0] & CMD_ERRORS) {
1528                 pr_err("%s: r/w command failed, status = %#x\n",
1529                        req->rq_disk->disk_name, brq->cmd.resp[0]);
1530                 return MMC_BLK_ABORT;
1531         }
1532
1533         /*
1534          * Everything else is either success, or a data error of some
1535          * kind.  If it was a write, we may have transitioned to
1536          * program mode, which we have to wait for it to complete.
1537          */
1538         if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
1539                 int err;
1540
1541                 /* Check stop command response */
1542                 if (brq->stop.resp[0] & R1_ERROR) {
1543                         pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
1544                                req->rq_disk->disk_name, __func__,
1545                                brq->stop.resp[0]);
1546                         gen_err = true;
1547                 }
1548
1549                 err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, false, req,
1550                                         &gen_err);
1551                 if (err)
1552                         return MMC_BLK_CMD_ERR;
1553         }
1554
1555         /* if general error occurs, retry the write operation. */
1556         if (gen_err) {
1557                 pr_warn("%s: retrying write for general error\n",
1558                                 req->rq_disk->disk_name);
1559                 return MMC_BLK_RETRY;
1560         }
1561
1562         /* Some errors (ECC) are flagged on the next commmand, so check stop, too */
1563         if (brq->data.error || brq->stop.error) {
1564                 if (need_retune && !brq->retune_retry_done) {
1565                         pr_debug("%s: retrying because a re-tune was needed\n",
1566                                  req->rq_disk->disk_name);
1567                         brq->retune_retry_done = 1;
1568                         return MMC_BLK_RETRY;
1569                 }
1570                 pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
1571                        req->rq_disk->disk_name, brq->data.error ?: brq->stop.error,
1572                        (unsigned)blk_rq_pos(req),
1573                        (unsigned)blk_rq_sectors(req),
1574                        brq->cmd.resp[0], brq->stop.resp[0]);
1575
1576                 if (rq_data_dir(req) == READ) {
1577                         if (ecc_err)
1578                                 return MMC_BLK_ECC_ERR;
1579                         return MMC_BLK_DATA_ERR;
1580                 } else {
1581                         return MMC_BLK_CMD_ERR;
1582                 }
1583         }
1584
1585         if (!brq->data.bytes_xfered)
1586                 return MMC_BLK_RETRY;
1587
1588         if (blk_rq_bytes(req) != brq->data.bytes_xfered)
1589                 return MMC_BLK_PARTIAL;
1590
1591         return MMC_BLK_SUCCESS;
1592 }
1593
1594 static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
1595                               int disable_multi, bool *do_rel_wr_p,
1596                               bool *do_data_tag_p)
1597 {
1598         struct mmc_blk_data *md = mq->blkdata;
1599         struct mmc_card *card = md->queue.card;
1600         struct mmc_blk_request *brq = &mqrq->brq;
1601         struct request *req = mmc_queue_req_to_req(mqrq);
1602         bool do_rel_wr, do_data_tag;
1603
1604         /*
1605          * Reliable writes are used to implement Forced Unit Access and
1606          * are supported only on MMCs.
1607          */
1608         do_rel_wr = (req->cmd_flags & REQ_FUA) &&
1609                     rq_data_dir(req) == WRITE &&
1610                     (md->flags & MMC_BLK_REL_WR);
1611
1612         memset(brq, 0, sizeof(struct mmc_blk_request));
1613
1614         brq->mrq.data = &brq->data;
1615         brq->mrq.tag = req->tag;
1616
1617         brq->stop.opcode = MMC_STOP_TRANSMISSION;
1618         brq->stop.arg = 0;
1619
1620         if (rq_data_dir(req) == READ) {
1621                 brq->data.flags = MMC_DATA_READ;
1622                 brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1623         } else {
1624                 brq->data.flags = MMC_DATA_WRITE;
1625                 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1626         }
1627
1628         brq->data.blksz = 512;
1629         brq->data.blocks = blk_rq_sectors(req);
1630         brq->data.blk_addr = blk_rq_pos(req);
1631
1632         /*
1633          * The command queue supports 2 priorities: "high" (1) and "simple" (0).
1634          * The eMMC will give "high" priority tasks priority over "simple"
1635          * priority tasks. Here we always set "simple" priority by not setting
1636          * MMC_DATA_PRIO.
1637          */
1638
1639         /*
1640          * The block layer doesn't support all sector count
1641          * restrictions, so we need to be prepared for too big
1642          * requests.
1643          */
1644         if (brq->data.blocks > card->host->max_blk_count)
1645                 brq->data.blocks = card->host->max_blk_count;
1646
1647         if (brq->data.blocks > 1) {
1648                 /*
1649                  * After a read error, we redo the request one sector
1650                  * at a time in order to accurately determine which
1651                  * sectors can be read successfully.
1652                  */
1653                 if (disable_multi)
1654                         brq->data.blocks = 1;
1655
1656                 /*
1657                  * Some controllers have HW issues while operating
1658                  * in multiple I/O mode
1659                  */
1660                 if (card->host->ops->multi_io_quirk)
1661                         brq->data.blocks = card->host->ops->multi_io_quirk(card,
1662                                                 (rq_data_dir(req) == READ) ?
1663                                                 MMC_DATA_READ : MMC_DATA_WRITE,
1664                                                 brq->data.blocks);
1665         }
1666
1667         if (do_rel_wr) {
1668                 mmc_apply_rel_rw(brq, card, req);
1669                 brq->data.flags |= MMC_DATA_REL_WR;
1670         }
1671
1672         /*
1673          * Data tag is used only during writing meta data to speed
1674          * up write and any subsequent read of this meta data
1675          */
1676         do_data_tag = card->ext_csd.data_tag_unit_size &&
1677                       (req->cmd_flags & REQ_META) &&
1678                       (rq_data_dir(req) == WRITE) &&
1679                       ((brq->data.blocks * brq->data.blksz) >=
1680                        card->ext_csd.data_tag_unit_size);
1681
1682         if (do_data_tag)
1683                 brq->data.flags |= MMC_DATA_DAT_TAG;
1684
1685         mmc_set_data_timeout(&brq->data, card);
1686
1687         brq->data.sg = mqrq->sg;
1688         brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
1689
1690         /*
1691          * Adjust the sg list so it is the same size as the
1692          * request.
1693          */
1694         if (brq->data.blocks != blk_rq_sectors(req)) {
1695                 int i, data_size = brq->data.blocks << 9;
1696                 struct scatterlist *sg;
1697
1698                 for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
1699                         data_size -= sg->length;
1700                         if (data_size <= 0) {
1701                                 sg->length += data_size;
1702                                 i++;
1703                                 break;
1704                         }
1705                 }
1706                 brq->data.sg_len = i;
1707         }
1708
1709         mqrq->areq.mrq = &brq->mrq;
1710
1711         if (do_rel_wr_p)
1712                 *do_rel_wr_p = do_rel_wr;
1713
1714         if (do_data_tag_p)
1715                 *do_data_tag_p = do_data_tag;
1716 }
1717
1718 static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
1719                                struct mmc_card *card,
1720                                int disable_multi,
1721                                struct mmc_queue *mq)
1722 {
1723         u32 readcmd, writecmd;
1724         struct mmc_blk_request *brq = &mqrq->brq;
1725         struct request *req = mmc_queue_req_to_req(mqrq);
1726         struct mmc_blk_data *md = mq->blkdata;
1727         bool do_rel_wr, do_data_tag;
1728
1729         mmc_blk_data_prep(mq, mqrq, disable_multi, &do_rel_wr, &do_data_tag);
1730
1731         brq->mrq.cmd = &brq->cmd;
1732
1733         brq->cmd.arg = blk_rq_pos(req);
1734         if (!mmc_card_blockaddr(card))
1735                 brq->cmd.arg <<= 9;
1736         brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
1737
1738         if (brq->data.blocks > 1 || do_rel_wr) {
1739                 /* SPI multiblock writes terminate using a special
1740                  * token, not a STOP_TRANSMISSION request.
1741                  */
1742                 if (!mmc_host_is_spi(card->host) ||
1743                     rq_data_dir(req) == READ)
1744                         brq->mrq.stop = &brq->stop;
1745                 readcmd = MMC_READ_MULTIPLE_BLOCK;
1746                 writecmd = MMC_WRITE_MULTIPLE_BLOCK;
1747         } else {
1748                 brq->mrq.stop = NULL;
1749                 readcmd = MMC_READ_SINGLE_BLOCK;
1750                 writecmd = MMC_WRITE_BLOCK;
1751         }
1752         brq->cmd.opcode = rq_data_dir(req) == READ ? readcmd : writecmd;
1753
1754         /*
1755          * Pre-defined multi-block transfers are preferable to
1756          * open ended-ones (and necessary for reliable writes).
1757          * However, it is not sufficient to just send CMD23,
1758          * and avoid the final CMD12, as on an error condition
1759          * CMD12 (stop) needs to be sent anyway. This, coupled
1760          * with Auto-CMD23 enhancements provided by some
1761          * hosts, means that the complexity of dealing
1762          * with this is best left to the host. If CMD23 is
1763          * supported by card and host, we'll fill sbc in and let
1764          * the host deal with handling it correctly. This means
1765          * that for hosts that don't expose MMC_CAP_CMD23, no
1766          * change of behavior will be observed.
1767          *
1768          * N.B: Some MMC cards experience perf degradation.
1769          * We'll avoid using CMD23-bounded multiblock writes for
1770          * these, while retaining features like reliable writes.
1771          */
1772         if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) &&
1773             (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) ||
1774              do_data_tag)) {
1775                 brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
1776                 brq->sbc.arg = brq->data.blocks |
1777                         (do_rel_wr ? (1 << 31) : 0) |
1778                         (do_data_tag ? (1 << 29) : 0);
1779                 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
1780                 brq->mrq.sbc = &brq->sbc;
1781         }
1782
1783         mqrq->areq.err_check = mmc_blk_err_check;
1784 }
1785
1786 static bool mmc_blk_rw_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
1787                                struct mmc_blk_request *brq, struct request *req,
1788                                bool old_req_pending)
1789 {
1790         bool req_pending;
1791
1792         /*
1793          * If this is an SD card and we're writing, we can first
1794          * mark the known good sectors as ok.
1795          *
1796          * If the card is not SD, we can still ok written sectors
1797          * as reported by the controller (which might be less than
1798          * the real number of written sectors, but never more).
1799          */
1800         if (mmc_card_sd(card)) {
1801                 u32 blocks;
1802                 int err;
1803
1804                 err = mmc_sd_num_wr_blocks(card, &blocks);
1805                 if (err)
1806                         req_pending = old_req_pending;
1807                 else
1808                         req_pending = blk_end_request(req, BLK_STS_OK, blocks << 9);
1809         } else {
1810                 req_pending = blk_end_request(req, BLK_STS_OK, brq->data.bytes_xfered);
1811         }
1812         return req_pending;
1813 }
1814
1815 static void mmc_blk_rw_cmd_abort(struct mmc_queue *mq, struct mmc_card *card,
1816                                  struct request *req,
1817                                  struct mmc_queue_req *mqrq)
1818 {
1819         if (mmc_card_removed(card))
1820                 req->rq_flags |= RQF_QUIET;
1821         while (blk_end_request(req, BLK_STS_IOERR, blk_rq_cur_bytes(req)));
1822         mq->qcnt--;
1823 }
1824
1825 /**
1826  * mmc_blk_rw_try_restart() - tries to restart the current async request
1827  * @mq: the queue with the card and host to restart
1828  * @req: a new request that want to be started after the current one
1829  */
1830 static void mmc_blk_rw_try_restart(struct mmc_queue *mq, struct request *req,
1831                                    struct mmc_queue_req *mqrq)
1832 {
1833         if (!req)
1834                 return;
1835
1836         /*
1837          * If the card was removed, just cancel everything and return.
1838          */
1839         if (mmc_card_removed(mq->card)) {
1840                 req->rq_flags |= RQF_QUIET;
1841                 blk_end_request_all(req, BLK_STS_IOERR);
1842                 mq->qcnt--; /* FIXME: just set to 0? */
1843                 return;
1844         }
1845         /* Else proceed and try to restart the current async request */
1846         mmc_blk_rw_rq_prep(mqrq, mq->card, 0, mq);
1847         mmc_start_areq(mq->card->host, &mqrq->areq, NULL);
1848 }
1849
1850 static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
1851 {
1852         struct mmc_blk_data *md = mq->blkdata;
1853         struct mmc_card *card = md->queue.card;
1854         struct mmc_blk_request *brq;
1855         int disable_multi = 0, retry = 0, type, retune_retry_done = 0;
1856         enum mmc_blk_status status;
1857         struct mmc_queue_req *mqrq_cur = NULL;
1858         struct mmc_queue_req *mq_rq;
1859         struct request *old_req;
1860         struct mmc_async_req *new_areq;
1861         struct mmc_async_req *old_areq;
1862         bool req_pending = true;
1863
1864         if (new_req) {
1865                 mqrq_cur = req_to_mmc_queue_req(new_req);
1866                 mq->qcnt++;
1867         }
1868
1869         if (!mq->qcnt)
1870                 return;
1871
1872         do {
1873                 if (new_req) {
1874                         /*
1875                          * When 4KB native sector is enabled, only 8 blocks
1876                          * multiple read or write is allowed
1877                          */
1878                         if (mmc_large_sector(card) &&
1879                                 !IS_ALIGNED(blk_rq_sectors(new_req), 8)) {
1880                                 pr_err("%s: Transfer size is not 4KB sector size aligned\n",
1881                                         new_req->rq_disk->disk_name);
1882                                 mmc_blk_rw_cmd_abort(mq, card, new_req, mqrq_cur);
1883                                 return;
1884                         }
1885
1886                         mmc_blk_rw_rq_prep(mqrq_cur, card, 0, mq);
1887                         new_areq = &mqrq_cur->areq;
1888                 } else
1889                         new_areq = NULL;
1890
1891                 old_areq = mmc_start_areq(card->host, new_areq, &status);
1892                 if (!old_areq) {
1893                         /*
1894                          * We have just put the first request into the pipeline
1895                          * and there is nothing more to do until it is
1896                          * complete.
1897                          */
1898                         return;
1899                 }
1900
1901                 /*
1902                  * An asynchronous request has been completed and we proceed
1903                  * to handle the result of it.
1904                  */
1905                 mq_rq = container_of(old_areq, struct mmc_queue_req, areq);
1906                 brq = &mq_rq->brq;
1907                 old_req = mmc_queue_req_to_req(mq_rq);
1908                 type = rq_data_dir(old_req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
1909
1910                 switch (status) {
1911                 case MMC_BLK_SUCCESS:
1912                 case MMC_BLK_PARTIAL:
1913                         /*
1914                          * A block was successfully transferred.
1915                          */
1916                         mmc_blk_reset_success(md, type);
1917
1918                         req_pending = blk_end_request(old_req, BLK_STS_OK,
1919                                                       brq->data.bytes_xfered);
1920                         /*
1921                          * If the blk_end_request function returns non-zero even
1922                          * though all data has been transferred and no errors
1923                          * were returned by the host controller, it's a bug.
1924                          */
1925                         if (status == MMC_BLK_SUCCESS && req_pending) {
1926                                 pr_err("%s BUG rq_tot %d d_xfer %d\n",
1927                                        __func__, blk_rq_bytes(old_req),
1928                                        brq->data.bytes_xfered);
1929                                 mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
1930                                 return;
1931                         }
1932                         break;
1933                 case MMC_BLK_CMD_ERR:
1934                         req_pending = mmc_blk_rw_cmd_err(md, card, brq, old_req, req_pending);
1935                         if (mmc_blk_reset(md, card->host, type)) {
1936                                 if (req_pending)
1937                                         mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
1938                                 else
1939                                         mq->qcnt--;
1940                                 mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
1941                                 return;
1942                         }
1943                         if (!req_pending) {
1944                                 mq->qcnt--;
1945                                 mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
1946                                 return;
1947                         }
1948                         break;
1949                 case MMC_BLK_RETRY:
1950                         retune_retry_done = brq->retune_retry_done;
1951                         if (retry++ < 5)
1952                                 break;
1953                         /* Fall through */
1954                 case MMC_BLK_ABORT:
1955                         if (!mmc_blk_reset(md, card->host, type))
1956                                 break;
1957                         mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
1958                         mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
1959                         return;
1960                 case MMC_BLK_DATA_ERR: {
1961                         int err;
1962
1963                         err = mmc_blk_reset(md, card->host, type);
1964                         if (!err)
1965                                 break;
1966                         if (err == -ENODEV) {
1967                                 mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
1968                                 mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
1969                                 return;
1970                         }
1971                         /* Fall through */
1972                 }
1973                 case MMC_BLK_ECC_ERR:
1974                         if (brq->data.blocks > 1) {
1975                                 /* Redo read one sector at a time */
1976                                 pr_warn("%s: retrying using single block read\n",
1977                                         old_req->rq_disk->disk_name);
1978                                 disable_multi = 1;
1979                                 break;
1980                         }
1981                         /*
1982                          * After an error, we redo I/O one sector at a
1983                          * time, so we only reach here after trying to
1984                          * read a single sector.
1985                          */
1986                         req_pending = blk_end_request(old_req, BLK_STS_IOERR,
1987                                                       brq->data.blksz);
1988                         if (!req_pending) {
1989                                 mq->qcnt--;
1990                                 mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
1991                                 return;
1992                         }
1993                         break;
1994                 case MMC_BLK_NOMEDIUM:
1995                         mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
1996                         mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
1997                         return;
1998                 default:
1999                         pr_err("%s: Unhandled return value (%d)",
2000                                         old_req->rq_disk->disk_name, status);
2001                         mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
2002                         mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
2003                         return;
2004                 }
2005
2006                 if (req_pending) {
2007                         /*
2008                          * In case of a incomplete request
2009                          * prepare it again and resend.
2010                          */
2011                         mmc_blk_rw_rq_prep(mq_rq, card,
2012                                         disable_multi, mq);
2013                         mmc_start_areq(card->host,
2014                                         &mq_rq->areq, NULL);
2015                         mq_rq->brq.retune_retry_done = retune_retry_done;
2016                 }
2017         } while (req_pending);
2018
2019         mq->qcnt--;
2020 }
2021
2022 void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
2023 {
2024         int ret;
2025         struct mmc_blk_data *md = mq->blkdata;
2026         struct mmc_card *card = md->queue.card;
2027
2028         if (req && !mq->qcnt)
2029                 /* claim host only for the first request */
2030                 mmc_get_card(card, NULL);
2031
2032         ret = mmc_blk_part_switch(card, md->part_type);
2033         if (ret) {
2034                 if (req) {
2035                         blk_end_request_all(req, BLK_STS_IOERR);
2036                 }
2037                 goto out;
2038         }
2039
2040         if (req) {
2041                 switch (req_op(req)) {
2042                 case REQ_OP_DRV_IN:
2043                 case REQ_OP_DRV_OUT:
2044                         /*
2045                          * Complete ongoing async transfer before issuing
2046                          * ioctl()s
2047                          */
2048                         if (mq->qcnt)
2049                                 mmc_blk_issue_rw_rq(mq, NULL);
2050                         mmc_blk_issue_drv_op(mq, req);
2051                         break;
2052                 case REQ_OP_DISCARD:
2053                         /*
2054                          * Complete ongoing async transfer before issuing
2055                          * discard.
2056                          */
2057                         if (mq->qcnt)
2058                                 mmc_blk_issue_rw_rq(mq, NULL);
2059                         mmc_blk_issue_discard_rq(mq, req);
2060                         break;
2061                 case REQ_OP_SECURE_ERASE:
2062                         /*
2063                          * Complete ongoing async transfer before issuing
2064                          * secure erase.
2065                          */
2066                         if (mq->qcnt)
2067                                 mmc_blk_issue_rw_rq(mq, NULL);
2068                         mmc_blk_issue_secdiscard_rq(mq, req);
2069                         break;
2070                 case REQ_OP_FLUSH:
2071                         /*
2072                          * Complete ongoing async transfer before issuing
2073                          * flush.
2074                          */
2075                         if (mq->qcnt)
2076                                 mmc_blk_issue_rw_rq(mq, NULL);
2077                         mmc_blk_issue_flush(mq, req);
2078                         break;
2079                 default:
2080                         /* Normal request, just issue it */
2081                         mmc_blk_issue_rw_rq(mq, req);
2082                         card->host->context_info.is_waiting_last_req = false;
2083                         break;
2084                 }
2085         } else {
2086                 /* No request, flushing the pipeline with NULL */
2087                 mmc_blk_issue_rw_rq(mq, NULL);
2088                 card->host->context_info.is_waiting_last_req = false;
2089         }
2090
2091 out:
2092         if (!mq->qcnt)
2093                 mmc_put_card(card, NULL);
2094 }
2095
2096 static inline int mmc_blk_readonly(struct mmc_card *card)
2097 {
2098         return mmc_card_readonly(card) ||
2099                !(card->csd.cmdclass & CCC_BLOCK_WRITE);
2100 }
2101
2102 static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
2103                                               struct device *parent,
2104                                               sector_t size,
2105                                               bool default_ro,
2106                                               const char *subname,
2107                                               int area_type)
2108 {
2109         struct mmc_blk_data *md;
2110         int devidx, ret;
2111
2112         devidx = ida_simple_get(&mmc_blk_ida, 0, max_devices, GFP_KERNEL);
2113         if (devidx < 0) {
2114                 /*
2115                  * We get -ENOSPC because there are no more any available
2116                  * devidx. The reason may be that, either userspace haven't yet
2117                  * unmounted the partitions, which postpones mmc_blk_release()
2118                  * from being called, or the device has more partitions than
2119                  * what we support.
2120                  */
2121                 if (devidx == -ENOSPC)
2122                         dev_err(mmc_dev(card->host),
2123                                 "no more device IDs available\n");
2124
2125                 return ERR_PTR(devidx);
2126         }
2127
2128         md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
2129         if (!md) {
2130                 ret = -ENOMEM;
2131                 goto out;
2132         }
2133
2134         md->area_type = area_type;
2135
2136         /*
2137          * Set the read-only status based on the supported commands
2138          * and the write protect switch.
2139          */
2140         md->read_only = mmc_blk_readonly(card);
2141
2142         md->disk = alloc_disk(perdev_minors);
2143         if (md->disk == NULL) {
2144                 ret = -ENOMEM;
2145                 goto err_kfree;
2146         }
2147
2148         spin_lock_init(&md->lock);
2149         INIT_LIST_HEAD(&md->part);
2150         INIT_LIST_HEAD(&md->rpmbs);
2151         md->usage = 1;
2152
2153         ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
2154         if (ret)
2155                 goto err_putdisk;
2156
2157         md->queue.blkdata = md;
2158
2159         md->disk->major = MMC_BLOCK_MAJOR;
2160         md->disk->first_minor = devidx * perdev_minors;
2161         md->disk->fops = &mmc_bdops;
2162         md->disk->private_data = md;
2163         md->disk->queue = md->queue.queue;
2164         md->parent = parent;
2165         set_disk_ro(md->disk, md->read_only || default_ro);
2166         md->disk->flags = GENHD_FL_EXT_DEVT;
2167         if (area_type & (MMC_BLK_DATA_AREA_RPMB | MMC_BLK_DATA_AREA_BOOT))
2168                 md->disk->flags |= GENHD_FL_NO_PART_SCAN;
2169
2170         /*
2171          * As discussed on lkml, GENHD_FL_REMOVABLE should:
2172          *
2173          * - be set for removable media with permanent block devices
2174          * - be unset for removable block devices with permanent media
2175          *
2176          * Since MMC block devices clearly fall under the second
2177          * case, we do not set GENHD_FL_REMOVABLE.  Userspace
2178          * should use the block device creation/destruction hotplug
2179          * messages to tell when the card is present.
2180          */
2181
2182         snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
2183                  "mmcblk%u%s", card->host->index, subname ? subname : "");
2184
2185         if (mmc_card_mmc(card))
2186                 blk_queue_logical_block_size(md->queue.queue,
2187                                              card->ext_csd.data_sector_size);
2188         else
2189                 blk_queue_logical_block_size(md->queue.queue, 512);
2190
2191         set_capacity(md->disk, size);
2192
2193         if (mmc_host_cmd23(card->host)) {
2194                 if ((mmc_card_mmc(card) &&
2195                      card->csd.mmca_vsn >= CSD_SPEC_VER_3) ||
2196                     (mmc_card_sd(card) &&
2197                      card->scr.cmds & SD_SCR_CMD23_SUPPORT))
2198                         md->flags |= MMC_BLK_CMD23;
2199         }
2200
2201         if (mmc_card_mmc(card) &&
2202             md->flags & MMC_BLK_CMD23 &&
2203             ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
2204              card->ext_csd.rel_sectors)) {
2205                 md->flags |= MMC_BLK_REL_WR;
2206                 blk_queue_write_cache(md->queue.queue, true, true);
2207         }
2208
2209         return md;
2210
2211  err_putdisk:
2212         put_disk(md->disk);
2213  err_kfree:
2214         kfree(md);
2215  out:
2216         ida_simple_remove(&mmc_blk_ida, devidx);
2217         return ERR_PTR(ret);
2218 }
2219
2220 static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
2221 {
2222         sector_t size;
2223
2224         if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
2225                 /*
2226                  * The EXT_CSD sector count is in number or 512 byte
2227                  * sectors.
2228                  */
2229                 size = card->ext_csd.sectors;
2230         } else {
2231                 /*
2232                  * The CSD capacity field is in units of read_blkbits.
2233                  * set_capacity takes units of 512 bytes.
2234                  */
2235                 size = (typeof(sector_t))card->csd.capacity
2236                         << (card->csd.read_blkbits - 9);
2237         }
2238
2239         return mmc_blk_alloc_req(card, &card->dev, size, false, NULL,
2240                                         MMC_BLK_DATA_AREA_MAIN);
2241 }
2242
2243 static int mmc_blk_alloc_part(struct mmc_card *card,
2244                               struct mmc_blk_data *md,
2245                               unsigned int part_type,
2246                               sector_t size,
2247                               bool default_ro,
2248                               const char *subname,
2249                               int area_type)
2250 {
2251         char cap_str[10];
2252         struct mmc_blk_data *part_md;
2253
2254         part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
2255                                     subname, area_type);
2256         if (IS_ERR(part_md))
2257                 return PTR_ERR(part_md);
2258         part_md->part_type = part_type;
2259         list_add(&part_md->part, &md->part);
2260
2261         string_get_size((u64)get_capacity(part_md->disk), 512, STRING_UNITS_2,
2262                         cap_str, sizeof(cap_str));
2263         pr_info("%s: %s %s partition %u %s\n",
2264                part_md->disk->disk_name, mmc_card_id(card),
2265                mmc_card_name(card), part_md->part_type, cap_str);
2266         return 0;
2267 }
2268
2269 /**
2270  * mmc_rpmb_ioctl() - ioctl handler for the RPMB chardev
2271  * @filp: the character device file
2272  * @cmd: the ioctl() command
2273  * @arg: the argument from userspace
2274  *
2275  * This will essentially just redirect the ioctl()s coming in over to
2276  * the main block device spawning the RPMB character device.
2277  */
2278 static long mmc_rpmb_ioctl(struct file *filp, unsigned int cmd,
2279                            unsigned long arg)
2280 {
2281         struct mmc_rpmb_data *rpmb = filp->private_data;
2282         int ret;
2283
2284         switch (cmd) {
2285         case MMC_IOC_CMD:
2286                 ret = mmc_blk_ioctl_cmd(rpmb->md,
2287                                         (struct mmc_ioc_cmd __user *)arg,
2288                                         rpmb);
2289                 break;
2290         case MMC_IOC_MULTI_CMD:
2291                 ret = mmc_blk_ioctl_multi_cmd(rpmb->md,
2292                                         (struct mmc_ioc_multi_cmd __user *)arg,
2293                                         rpmb);
2294                 break;
2295         default:
2296                 ret = -EINVAL;
2297                 break;
2298         }
2299
2300         return 0;
2301 }
2302
2303 #ifdef CONFIG_COMPAT
2304 static long mmc_rpmb_ioctl_compat(struct file *filp, unsigned int cmd,
2305                               unsigned long arg)
2306 {
2307         return mmc_rpmb_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
2308 }
2309 #endif
2310
2311 static int mmc_rpmb_chrdev_open(struct inode *inode, struct file *filp)
2312 {
2313         struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev,
2314                                                   struct mmc_rpmb_data, chrdev);
2315
2316         get_device(&rpmb->dev);
2317         filp->private_data = rpmb;
2318         mmc_blk_get(rpmb->md->disk);
2319
2320         return nonseekable_open(inode, filp);
2321 }
2322
2323 static int mmc_rpmb_chrdev_release(struct inode *inode, struct file *filp)
2324 {
2325         struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev,
2326                                                   struct mmc_rpmb_data, chrdev);
2327
2328         put_device(&rpmb->dev);
2329         mmc_blk_put(rpmb->md);
2330
2331         return 0;
2332 }
2333
2334 static const struct file_operations mmc_rpmb_fileops = {
2335         .release = mmc_rpmb_chrdev_release,
2336         .open = mmc_rpmb_chrdev_open,
2337         .owner = THIS_MODULE,
2338         .llseek = no_llseek,
2339         .unlocked_ioctl = mmc_rpmb_ioctl,
2340 #ifdef CONFIG_COMPAT
2341         .compat_ioctl = mmc_rpmb_ioctl_compat,
2342 #endif
2343 };
2344
2345 static void mmc_blk_rpmb_device_release(struct device *dev)
2346 {
2347         struct mmc_rpmb_data *rpmb = dev_get_drvdata(dev);
2348
2349         ida_simple_remove(&mmc_rpmb_ida, rpmb->id);
2350         kfree(rpmb);
2351 }
2352
2353 static int mmc_blk_alloc_rpmb_part(struct mmc_card *card,
2354                                    struct mmc_blk_data *md,
2355                                    unsigned int part_index,
2356                                    sector_t size,
2357                                    const char *subname)
2358 {
2359         int devidx, ret;
2360         char rpmb_name[DISK_NAME_LEN];
2361         char cap_str[10];
2362         struct mmc_rpmb_data *rpmb;
2363
2364         /* This creates the minor number for the RPMB char device */
2365         devidx = ida_simple_get(&mmc_rpmb_ida, 0, max_devices, GFP_KERNEL);
2366         if (devidx < 0)
2367                 return devidx;
2368
2369         rpmb = kzalloc(sizeof(*rpmb), GFP_KERNEL);
2370         if (!rpmb) {
2371                 ida_simple_remove(&mmc_rpmb_ida, devidx);
2372                 return -ENOMEM;
2373         }
2374
2375         snprintf(rpmb_name, sizeof(rpmb_name),
2376                  "mmcblk%u%s", card->host->index, subname ? subname : "");
2377
2378         rpmb->id = devidx;
2379         rpmb->part_index = part_index;
2380         rpmb->dev.init_name = rpmb_name;
2381         rpmb->dev.bus = &mmc_rpmb_bus_type;
2382         rpmb->dev.devt = MKDEV(MAJOR(mmc_rpmb_devt), rpmb->id);
2383         rpmb->dev.parent = &card->dev;
2384         rpmb->dev.release = mmc_blk_rpmb_device_release;
2385         device_initialize(&rpmb->dev);
2386         dev_set_drvdata(&rpmb->dev, rpmb);
2387         rpmb->md = md;
2388
2389         cdev_init(&rpmb->chrdev, &mmc_rpmb_fileops);
2390         rpmb->chrdev.owner = THIS_MODULE;
2391         ret = cdev_device_add(&rpmb->chrdev, &rpmb->dev);
2392         if (ret) {
2393                 pr_err("%s: could not add character device\n", rpmb_name);
2394                 goto out_put_device;
2395         }
2396
2397         list_add(&rpmb->node, &md->rpmbs);
2398
2399         string_get_size((u64)size, 512, STRING_UNITS_2,
2400                         cap_str, sizeof(cap_str));
2401
2402         pr_info("%s: %s %s partition %u %s, chardev (%d:%d)\n",
2403                 rpmb_name, mmc_card_id(card),
2404                 mmc_card_name(card), EXT_CSD_PART_CONFIG_ACC_RPMB, cap_str,
2405                 MAJOR(mmc_rpmb_devt), rpmb->id);
2406
2407         return 0;
2408
2409 out_put_device:
2410         put_device(&rpmb->dev);
2411         return ret;
2412 }
2413
2414 static void mmc_blk_remove_rpmb_part(struct mmc_rpmb_data *rpmb)
2415
2416 {
2417         cdev_device_del(&rpmb->chrdev, &rpmb->dev);
2418         put_device(&rpmb->dev);
2419 }
2420
2421 /* MMC Physical partitions consist of two boot partitions and
2422  * up to four general purpose partitions.
2423  * For each partition enabled in EXT_CSD a block device will be allocatedi
2424  * to provide access to the partition.
2425  */
2426
2427 static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
2428 {
2429         int idx, ret;
2430
2431         if (!mmc_card_mmc(card))
2432                 return 0;
2433
2434         for (idx = 0; idx < card->nr_parts; idx++) {
2435                 if (card->part[idx].area_type & MMC_BLK_DATA_AREA_RPMB) {
2436                         /*
2437                          * RPMB partitions does not provide block access, they
2438                          * are only accessed using ioctl():s. Thus create
2439                          * special RPMB block devices that do not have a
2440                          * backing block queue for these.
2441                          */
2442                         ret = mmc_blk_alloc_rpmb_part(card, md,
2443                                 card->part[idx].part_cfg,
2444                                 card->part[idx].size >> 9,
2445                                 card->part[idx].name);
2446                         if (ret)
2447                                 return ret;
2448                 } else if (card->part[idx].size) {
2449                         ret = mmc_blk_alloc_part(card, md,
2450                                 card->part[idx].part_cfg,
2451                                 card->part[idx].size >> 9,
2452                                 card->part[idx].force_ro,
2453                                 card->part[idx].name,
2454                                 card->part[idx].area_type);
2455                         if (ret)
2456                                 return ret;
2457                 }
2458         }
2459
2460         return 0;
2461 }
2462
2463 static void mmc_blk_remove_req(struct mmc_blk_data *md)
2464 {
2465         struct mmc_card *card;
2466
2467         if (md) {
2468                 /*
2469                  * Flush remaining requests and free queues. It
2470                  * is freeing the queue that stops new requests
2471                  * from being accepted.
2472                  */
2473                 card = md->queue.card;
2474                 spin_lock_irq(md->queue.queue->queue_lock);
2475                 queue_flag_set(QUEUE_FLAG_BYPASS, md->queue.queue);
2476                 spin_unlock_irq(md->queue.queue->queue_lock);
2477                 blk_set_queue_dying(md->queue.queue);
2478                 mmc_cleanup_queue(&md->queue);
2479                 if (md->disk->flags & GENHD_FL_UP) {
2480                         device_remove_file(disk_to_dev(md->disk), &md->force_ro);
2481                         if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
2482                                         card->ext_csd.boot_ro_lockable)
2483                                 device_remove_file(disk_to_dev(md->disk),
2484                                         &md->power_ro_lock);
2485
2486                         del_gendisk(md->disk);
2487                 }
2488                 mmc_blk_put(md);
2489         }
2490 }
2491
2492 static void mmc_blk_remove_parts(struct mmc_card *card,
2493                                  struct mmc_blk_data *md)
2494 {
2495         struct list_head *pos, *q;
2496         struct mmc_blk_data *part_md;
2497         struct mmc_rpmb_data *rpmb;
2498
2499         /* Remove RPMB partitions */
2500         list_for_each_safe(pos, q, &md->rpmbs) {
2501                 rpmb = list_entry(pos, struct mmc_rpmb_data, node);
2502                 list_del(pos);
2503                 mmc_blk_remove_rpmb_part(rpmb);
2504         }
2505         /* Remove block partitions */
2506         list_for_each_safe(pos, q, &md->part) {
2507                 part_md = list_entry(pos, struct mmc_blk_data, part);
2508                 list_del(pos);
2509                 mmc_blk_remove_req(part_md);
2510         }
2511 }
2512
2513 static int mmc_add_disk(struct mmc_blk_data *md)
2514 {
2515         int ret;
2516         struct mmc_card *card = md->queue.card;
2517
2518         device_add_disk(md->parent, md->disk);
2519         md->force_ro.show = force_ro_show;
2520         md->force_ro.store = force_ro_store;
2521         sysfs_attr_init(&md->force_ro.attr);
2522         md->force_ro.attr.name = "force_ro";
2523         md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
2524         ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
2525         if (ret)
2526                 goto force_ro_fail;
2527
2528         if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
2529              card->ext_csd.boot_ro_lockable) {
2530                 umode_t mode;
2531
2532                 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS)
2533                         mode = S_IRUGO;
2534                 else
2535                         mode = S_IRUGO | S_IWUSR;
2536
2537                 md->power_ro_lock.show = power_ro_lock_show;
2538                 md->power_ro_lock.store = power_ro_lock_store;
2539                 sysfs_attr_init(&md->power_ro_lock.attr);
2540                 md->power_ro_lock.attr.mode = mode;
2541                 md->power_ro_lock.attr.name =
2542                                         "ro_lock_until_next_power_on";
2543                 ret = device_create_file(disk_to_dev(md->disk),
2544                                 &md->power_ro_lock);
2545                 if (ret)
2546                         goto power_ro_lock_fail;
2547         }
2548         return ret;
2549
2550 power_ro_lock_fail:
2551         device_remove_file(disk_to_dev(md->disk), &md->force_ro);
2552 force_ro_fail:
2553         del_gendisk(md->disk);
2554
2555         return ret;
2556 }
2557
2558 #ifdef CONFIG_DEBUG_FS
2559
2560 static int mmc_dbg_card_status_get(void *data, u64 *val)
2561 {
2562         struct mmc_card *card = data;
2563         struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
2564         struct mmc_queue *mq = &md->queue;
2565         struct request *req;
2566         int ret;
2567
2568         /* Ask the block layer about the card status */
2569         req = blk_get_request(mq->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
2570         if (IS_ERR(req))
2571                 return PTR_ERR(req);
2572         req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS;
2573         blk_execute_rq(mq->queue, NULL, req, 0);
2574         ret = req_to_mmc_queue_req(req)->drv_op_result;
2575         if (ret >= 0) {
2576                 *val = ret;
2577                 ret = 0;
2578         }
2579         blk_put_request(req);
2580
2581         return ret;
2582 }
2583 DEFINE_SIMPLE_ATTRIBUTE(mmc_dbg_card_status_fops, mmc_dbg_card_status_get,
2584                 NULL, "%08llx\n");
2585
2586 /* That is two digits * 512 + 1 for newline */
2587 #define EXT_CSD_STR_LEN 1025
2588
2589 static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
2590 {
2591         struct mmc_card *card = inode->i_private;
2592         struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
2593         struct mmc_queue *mq = &md->queue;
2594         struct request *req;
2595         char *buf;
2596         ssize_t n = 0;
2597         u8 *ext_csd;
2598         int err, i;
2599
2600         buf = kmalloc(EXT_CSD_STR_LEN + 1, GFP_KERNEL);
2601         if (!buf)
2602                 return -ENOMEM;
2603
2604         /* Ask the block layer for the EXT CSD */
2605         req = blk_get_request(mq->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
2606         if (IS_ERR(req)) {
2607                 err = PTR_ERR(req);
2608                 goto out_free;
2609         }
2610         req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_EXT_CSD;
2611         req_to_mmc_queue_req(req)->drv_op_data = &ext_csd;
2612         blk_execute_rq(mq->queue, NULL, req, 0);
2613         err = req_to_mmc_queue_req(req)->drv_op_result;
2614         blk_put_request(req);
2615         if (err) {
2616                 pr_err("FAILED %d\n", err);
2617                 goto out_free;
2618         }
2619
2620         for (i = 0; i < 512; i++)
2621                 n += sprintf(buf + n, "%02x", ext_csd[i]);
2622         n += sprintf(buf + n, "\n");
2623
2624         if (n != EXT_CSD_STR_LEN) {
2625                 err = -EINVAL;
2626                 goto out_free;
2627         }
2628
2629         filp->private_data = buf;
2630         kfree(ext_csd);
2631         return 0;
2632
2633 out_free:
2634         kfree(buf);
2635         return err;
2636 }
2637
2638 static ssize_t mmc_ext_csd_read(struct file *filp, char __user *ubuf,
2639                                 size_t cnt, loff_t *ppos)
2640 {
2641         char *buf = filp->private_data;
2642
2643         return simple_read_from_buffer(ubuf, cnt, ppos,
2644                                        buf, EXT_CSD_STR_LEN);
2645 }
2646
2647 static int mmc_ext_csd_release(struct inode *inode, struct file *file)
2648 {
2649         kfree(file->private_data);
2650         return 0;
2651 }
2652
2653 static const struct file_operations mmc_dbg_ext_csd_fops = {
2654         .open           = mmc_ext_csd_open,
2655         .read           = mmc_ext_csd_read,
2656         .release        = mmc_ext_csd_release,
2657         .llseek         = default_llseek,
2658 };
2659
2660 static int mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md)
2661 {
2662         struct dentry *root;
2663
2664         if (!card->debugfs_root)
2665                 return 0;
2666
2667         root = card->debugfs_root;
2668
2669         if (mmc_card_mmc(card) || mmc_card_sd(card)) {
2670                 md->status_dentry =
2671                         debugfs_create_file("status", S_IRUSR, root, card,
2672                                             &mmc_dbg_card_status_fops);
2673                 if (!md->status_dentry)
2674                         return -EIO;
2675         }
2676
2677         if (mmc_card_mmc(card)) {
2678                 md->ext_csd_dentry =
2679                         debugfs_create_file("ext_csd", S_IRUSR, root, card,
2680                                             &mmc_dbg_ext_csd_fops);
2681                 if (!md->ext_csd_dentry)
2682                         return -EIO;
2683         }
2684
2685         return 0;
2686 }
2687
2688 static void mmc_blk_remove_debugfs(struct mmc_card *card,
2689                                    struct mmc_blk_data *md)
2690 {
2691         if (!card->debugfs_root)
2692                 return;
2693
2694         if (!IS_ERR_OR_NULL(md->status_dentry)) {
2695                 debugfs_remove(md->status_dentry);
2696                 md->status_dentry = NULL;
2697         }
2698
2699         if (!IS_ERR_OR_NULL(md->ext_csd_dentry)) {
2700                 debugfs_remove(md->ext_csd_dentry);
2701                 md->ext_csd_dentry = NULL;
2702         }
2703 }
2704
2705 #else
2706
2707 static int mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md)
2708 {
2709         return 0;
2710 }
2711
2712 static void mmc_blk_remove_debugfs(struct mmc_card *card,
2713                                    struct mmc_blk_data *md)
2714 {
2715 }
2716
2717 #endif /* CONFIG_DEBUG_FS */
2718
2719 static int mmc_blk_probe(struct mmc_card *card)
2720 {
2721         struct mmc_blk_data *md, *part_md;
2722         char cap_str[10];
2723
2724         /*
2725          * Check that the card supports the command class(es) we need.
2726          */
2727         if (!(card->csd.cmdclass & CCC_BLOCK_READ))
2728                 return -ENODEV;
2729
2730         mmc_fixup_device(card, mmc_blk_fixups);
2731
2732         md = mmc_blk_alloc(card);
2733         if (IS_ERR(md))
2734                 return PTR_ERR(md);
2735
2736         string_get_size((u64)get_capacity(md->disk), 512, STRING_UNITS_2,
2737                         cap_str, sizeof(cap_str));
2738         pr_info("%s: %s %s %s %s\n",
2739                 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
2740                 cap_str, md->read_only ? "(ro)" : "");
2741
2742         if (mmc_blk_alloc_parts(card, md))
2743                 goto out;
2744
2745         dev_set_drvdata(&card->dev, md);
2746
2747         if (mmc_add_disk(md))
2748                 goto out;
2749
2750         list_for_each_entry(part_md, &md->part, part) {
2751                 if (mmc_add_disk(part_md))
2752                         goto out;
2753         }
2754
2755         /* Add two debugfs entries */
2756         mmc_blk_add_debugfs(card, md);
2757
2758         pm_runtime_set_autosuspend_delay(&card->dev, 3000);
2759         pm_runtime_use_autosuspend(&card->dev);
2760
2761         /*
2762          * Don't enable runtime PM for SD-combo cards here. Leave that
2763          * decision to be taken during the SDIO init sequence instead.
2764          */
2765         if (card->type != MMC_TYPE_SD_COMBO) {
2766                 pm_runtime_set_active(&card->dev);
2767                 pm_runtime_enable(&card->dev);
2768         }
2769
2770         return 0;
2771
2772  out:
2773         mmc_blk_remove_parts(card, md);
2774         mmc_blk_remove_req(md);
2775         return 0;
2776 }
2777
2778 static void mmc_blk_remove(struct mmc_card *card)
2779 {
2780         struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
2781
2782         mmc_blk_remove_debugfs(card, md);
2783         mmc_blk_remove_parts(card, md);
2784         pm_runtime_get_sync(&card->dev);
2785         mmc_claim_host(card->host);
2786         mmc_blk_part_switch(card, md->part_type);
2787         mmc_release_host(card->host);
2788         if (card->type != MMC_TYPE_SD_COMBO)
2789                 pm_runtime_disable(&card->dev);
2790         pm_runtime_put_noidle(&card->dev);
2791         mmc_blk_remove_req(md);
2792         dev_set_drvdata(&card->dev, NULL);
2793 }
2794
2795 static int _mmc_blk_suspend(struct mmc_card *card)
2796 {
2797         struct mmc_blk_data *part_md;
2798         struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
2799
2800         if (md) {
2801                 mmc_queue_suspend(&md->queue);
2802                 list_for_each_entry(part_md, &md->part, part) {
2803                         mmc_queue_suspend(&part_md->queue);
2804                 }
2805         }
2806         return 0;
2807 }
2808
2809 static void mmc_blk_shutdown(struct mmc_card *card)
2810 {
2811         _mmc_blk_suspend(card);
2812 }
2813
2814 #ifdef CONFIG_PM_SLEEP
2815 static int mmc_blk_suspend(struct device *dev)
2816 {
2817         struct mmc_card *card = mmc_dev_to_card(dev);
2818
2819         return _mmc_blk_suspend(card);
2820 }
2821
2822 static int mmc_blk_resume(struct device *dev)
2823 {
2824         struct mmc_blk_data *part_md;
2825         struct mmc_blk_data *md = dev_get_drvdata(dev);
2826
2827         if (md) {
2828                 /*
2829                  * Resume involves the card going into idle state,
2830                  * so current partition is always the main one.
2831                  */
2832                 md->part_curr = md->part_type;
2833                 mmc_queue_resume(&md->queue);
2834                 list_for_each_entry(part_md, &md->part, part) {
2835                         mmc_queue_resume(&part_md->queue);
2836                 }
2837         }
2838         return 0;
2839 }
2840 #endif
2841
2842 static SIMPLE_DEV_PM_OPS(mmc_blk_pm_ops, mmc_blk_suspend, mmc_blk_resume);
2843
2844 static struct mmc_driver mmc_driver = {
2845         .drv            = {
2846                 .name   = "mmcblk",
2847                 .pm     = &mmc_blk_pm_ops,
2848         },
2849         .probe          = mmc_blk_probe,
2850         .remove         = mmc_blk_remove,
2851         .shutdown       = mmc_blk_shutdown,
2852 };
2853
2854 static int __init mmc_blk_init(void)
2855 {
2856         int res;
2857
2858         res  = bus_register(&mmc_rpmb_bus_type);
2859         if (res < 0) {
2860                 pr_err("mmcblk: could not register RPMB bus type\n");
2861                 return res;
2862         }
2863         res = alloc_chrdev_region(&mmc_rpmb_devt, 0, MAX_DEVICES, "rpmb");
2864         if (res < 0) {
2865                 pr_err("mmcblk: failed to allocate rpmb chrdev region\n");
2866                 goto out_bus_unreg;
2867         }
2868
2869         if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
2870                 pr_info("mmcblk: using %d minors per device\n", perdev_minors);
2871
2872         max_devices = min(MAX_DEVICES, (1 << MINORBITS) / perdev_minors);
2873
2874         res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
2875         if (res)
2876                 goto out_chrdev_unreg;
2877
2878         res = mmc_register_driver(&mmc_driver);
2879         if (res)
2880                 goto out_blkdev_unreg;
2881
2882         return 0;
2883
2884 out_blkdev_unreg:
2885         unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
2886 out_chrdev_unreg:
2887         unregister_chrdev_region(mmc_rpmb_devt, MAX_DEVICES);
2888 out_bus_unreg:
2889         bus_unregister(&mmc_rpmb_bus_type);
2890         return res;
2891 }
2892
2893 static void __exit mmc_blk_exit(void)
2894 {
2895         mmc_unregister_driver(&mmc_driver);
2896         unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
2897         unregister_chrdev_region(mmc_rpmb_devt, MAX_DEVICES);
2898 }
2899
2900 module_init(mmc_blk_init);
2901 module_exit(mmc_blk_exit);
2902
2903 MODULE_LICENSE("GPL");
2904 MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
2905