]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/mmc/core/block.c
Merge remote-tracking branches 'spi/topic/devprop', 'spi/topic/fsl', 'spi/topic/fsl...
[mirror_ubuntu-bionic-kernel.git] / drivers / mmc / core / block.c
CommitLineData
1da177e4
LT
1/*
2 * Block driver for media (i.e., flash cards)
3 *
4 * Copyright 2002 Hewlett-Packard Company
979ce720 5 * Copyright 2005-2008 Pierre Ossman
1da177e4
LT
6 *
7 * Use consistent with the GNU GPL is permitted,
8 * provided that this copyright notice is
9 * preserved in its entirety in all copies and derived works.
10 *
11 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
12 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
13 * FITNESS FOR ANY PARTICULAR PURPOSE.
14 *
15 * Many thanks to Alessandro Rubini and Jonathan Corbet!
16 *
17 * Author: Andrew Christian
18 * 28 May 2002
19 */
20#include <linux/moduleparam.h>
21#include <linux/module.h>
22#include <linux/init.h>
23
1da177e4
LT
24#include <linux/kernel.h>
25#include <linux/fs.h>
5a0e3ad6 26#include <linux/slab.h>
1da177e4
LT
27#include <linux/errno.h>
28#include <linux/hdreg.h>
29#include <linux/kdev_t.h>
30#include <linux/blkdev.h>
a621aaed 31#include <linux/mutex.h>
ec5a19dd 32#include <linux/scatterlist.h>
a7bbb573 33#include <linux/string_helpers.h>
cb87ea28
JC
34#include <linux/delay.h>
35#include <linux/capability.h>
36#include <linux/compat.h>
e94cfef6 37#include <linux/pm_runtime.h>
b10fa99e 38#include <linux/idr.h>
1da177e4 39
cb87ea28 40#include <linux/mmc/ioctl.h>
1da177e4 41#include <linux/mmc/card.h>
385e3227 42#include <linux/mmc/host.h>
da7fbe58
PO
43#include <linux/mmc/mmc.h>
44#include <linux/mmc/sd.h>
1da177e4 45
7c0f6ba6 46#include <linux/uaccess.h>
1da177e4 47
98ac2162 48#include "queue.h"
48ab086d 49#include "block.h"
55244c56 50#include "core.h"
4facdde1 51#include "card.h"
5857b29b 52#include "host.h"
4facdde1 53#include "bus.h"
55244c56 54#include "mmc_ops.h"
28fc64af 55#include "quirks.h"
55244c56 56#include "sd_ops.h"
1da177e4 57
6b0b6285 58MODULE_ALIAS("mmc:block");
5e71b7a6
OJ
59#ifdef MODULE_PARAM_PREFIX
60#undef MODULE_PARAM_PREFIX
61#endif
62#define MODULE_PARAM_PREFIX "mmcblk."
63
8fee476b 64#define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
775a9362
ME
65#define MMC_SANITIZE_REQ_TIMEOUT 240000
66#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
6a7a6b45 67
d3df0465 68#define mmc_req_rel_wr(req) ((req->cmd_flags & REQ_FUA) && \
ce39f9d1 69 (rq_data_dir(req) == WRITE))
5e71b7a6 70static DEFINE_MUTEX(block_mutex);
6b0b6285 71
1da177e4 72/*
5e71b7a6
OJ
73 * The defaults come from config options but can be overriden by module
74 * or bootarg options.
1da177e4 75 */
5e71b7a6 76static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
1dff3144 77
5e71b7a6
OJ
78/*
79 * We've only got one major, so number of mmcblk devices is
a26eba61 80 * limited to (1 << 20) / number of minors per device. It is also
b10fa99e 81 * limited by the MAX_DEVICES below.
5e71b7a6
OJ
82 */
83static int max_devices;
84
a26eba61
BH
85#define MAX_DEVICES 256
86
b10fa99e 87static DEFINE_IDA(mmc_blk_ida);
1da177e4 88
1da177e4
LT
89/*
90 * There is one mmc_blk_data per slot.
91 */
92struct mmc_blk_data {
93 spinlock_t lock;
307d8e6f 94 struct device *parent;
1da177e4
LT
95 struct gendisk *disk;
96 struct mmc_queue queue;
371a689f 97 struct list_head part;
1da177e4 98
d0c97cfb
AW
99 unsigned int flags;
100#define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
101#define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
102
1da177e4 103 unsigned int usage;
a6f6c96b 104 unsigned int read_only;
371a689f 105 unsigned int part_type;
67716327
AH
106 unsigned int reset_done;
107#define MMC_BLK_READ BIT(0)
108#define MMC_BLK_WRITE BIT(1)
109#define MMC_BLK_DISCARD BIT(2)
110#define MMC_BLK_SECDISCARD BIT(3)
371a689f
AW
111
112 /*
113 * Only set in main mmc_blk_data associated
fc95e30b 114 * with mmc_card with dev_set_drvdata, and keeps
371a689f
AW
115 * track of the current selected device partition.
116 */
117 unsigned int part_curr;
118 struct device_attribute force_ro;
add710ea
JR
119 struct device_attribute power_ro_lock;
120 int area_type;
1da177e4
LT
121};
122
a621aaed 123static DEFINE_MUTEX(open_lock);
1da177e4 124
5e71b7a6
OJ
125module_param(perdev_minors, int, 0444);
126MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
127
8d1e977d
LP
128static inline int mmc_blk_part_switch(struct mmc_card *card,
129 struct mmc_blk_data *md);
130static int get_card_status(struct mmc_card *card, u32 *status, int retries);
131
1da177e4
LT
132static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
133{
134 struct mmc_blk_data *md;
135
a621aaed 136 mutex_lock(&open_lock);
1da177e4
LT
137 md = disk->private_data;
138 if (md && md->usage == 0)
139 md = NULL;
140 if (md)
141 md->usage++;
a621aaed 142 mutex_unlock(&open_lock);
1da177e4
LT
143
144 return md;
145}
146
371a689f
AW
147static inline int mmc_get_devidx(struct gendisk *disk)
148{
382c55f8 149 int devidx = disk->first_minor / perdev_minors;
371a689f
AW
150 return devidx;
151}
152
1da177e4
LT
153static void mmc_blk_put(struct mmc_blk_data *md)
154{
a621aaed 155 mutex_lock(&open_lock);
1da177e4
LT
156 md->usage--;
157 if (md->usage == 0) {
371a689f 158 int devidx = mmc_get_devidx(md->disk);
5fa83ce2 159 blk_cleanup_queue(md->queue.queue);
a04848c7 160 ida_simple_remove(&mmc_blk_ida, devidx);
1da177e4 161 put_disk(md->disk);
1da177e4
LT
162 kfree(md);
163 }
a621aaed 164 mutex_unlock(&open_lock);
1da177e4
LT
165}
166
add710ea
JR
167static ssize_t power_ro_lock_show(struct device *dev,
168 struct device_attribute *attr, char *buf)
169{
170 int ret;
171 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
172 struct mmc_card *card = md->queue.card;
173 int locked = 0;
174
175 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN)
176 locked = 2;
177 else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
178 locked = 1;
179
180 ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
181
9098f84c
TW
182 mmc_blk_put(md);
183
add710ea
JR
184 return ret;
185}
186
187static ssize_t power_ro_lock_store(struct device *dev,
188 struct device_attribute *attr, const char *buf, size_t count)
189{
190 int ret;
191 struct mmc_blk_data *md, *part_md;
192 struct mmc_card *card;
193 unsigned long set;
194
195 if (kstrtoul(buf, 0, &set))
196 return -EINVAL;
197
198 if (set != 1)
199 return count;
200
201 md = mmc_blk_get(dev_to_disk(dev));
202 card = md->queue.card;
203
e94cfef6 204 mmc_get_card(card);
add710ea
JR
205
206 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
207 card->ext_csd.boot_ro_lock |
208 EXT_CSD_BOOT_WP_B_PWR_WP_EN,
209 card->ext_csd.part_time);
210 if (ret)
211 pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md->disk->disk_name, ret);
212 else
213 card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN;
214
e94cfef6 215 mmc_put_card(card);
add710ea
JR
216
217 if (!ret) {
218 pr_info("%s: Locking boot partition ro until next power on\n",
219 md->disk->disk_name);
220 set_disk_ro(md->disk, 1);
221
222 list_for_each_entry(part_md, &md->part, part)
223 if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) {
224 pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name);
225 set_disk_ro(part_md->disk, 1);
226 }
227 }
228
229 mmc_blk_put(md);
230 return count;
231}
232
371a689f
AW
233static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
234 char *buf)
235{
236 int ret;
237 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
238
0031a98a 239 ret = snprintf(buf, PAGE_SIZE, "%d\n",
371a689f
AW
240 get_disk_ro(dev_to_disk(dev)) ^
241 md->read_only);
242 mmc_blk_put(md);
243 return ret;
244}
245
246static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
247 const char *buf, size_t count)
248{
249 int ret;
250 char *end;
251 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
252 unsigned long set = simple_strtoul(buf, &end, 0);
253 if (end == buf) {
254 ret = -EINVAL;
255 goto out;
256 }
257
258 set_disk_ro(dev_to_disk(dev), set || md->read_only);
259 ret = count;
260out:
261 mmc_blk_put(md);
262 return ret;
263}
264
a5a1561f 265static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
1da177e4 266{
a5a1561f 267 struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
1da177e4
LT
268 int ret = -ENXIO;
269
2a48fc0a 270 mutex_lock(&block_mutex);
1da177e4
LT
271 if (md) {
272 if (md->usage == 2)
a5a1561f 273 check_disk_change(bdev);
1da177e4 274 ret = 0;
a00fc090 275
a5a1561f 276 if ((mode & FMODE_WRITE) && md->read_only) {
70bb0896 277 mmc_blk_put(md);
a00fc090 278 ret = -EROFS;
70bb0896 279 }
1da177e4 280 }
2a48fc0a 281 mutex_unlock(&block_mutex);
1da177e4
LT
282
283 return ret;
284}
285
db2a144b 286static void mmc_blk_release(struct gendisk *disk, fmode_t mode)
1da177e4 287{
a5a1561f 288 struct mmc_blk_data *md = disk->private_data;
1da177e4 289
2a48fc0a 290 mutex_lock(&block_mutex);
1da177e4 291 mmc_blk_put(md);
2a48fc0a 292 mutex_unlock(&block_mutex);
1da177e4
LT
293}
294
295static int
a885c8c4 296mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1da177e4 297{
a885c8c4
CH
298 geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
299 geo->heads = 4;
300 geo->sectors = 16;
301 return 0;
1da177e4
LT
302}
303
cb87ea28
JC
304struct mmc_blk_ioc_data {
305 struct mmc_ioc_cmd ic;
306 unsigned char *buf;
307 u64 buf_bytes;
308};
309
310static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
311 struct mmc_ioc_cmd __user *user)
312{
313 struct mmc_blk_ioc_data *idata;
314 int err;
315
1ff8950c 316 idata = kmalloc(sizeof(*idata), GFP_KERNEL);
cb87ea28
JC
317 if (!idata) {
318 err = -ENOMEM;
aea253ec 319 goto out;
cb87ea28
JC
320 }
321
322 if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
323 err = -EFAULT;
aea253ec 324 goto idata_err;
cb87ea28
JC
325 }
326
327 idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
328 if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
329 err = -EOVERFLOW;
aea253ec 330 goto idata_err;
cb87ea28
JC
331 }
332
bfe5b1b1
VV
333 if (!idata->buf_bytes) {
334 idata->buf = NULL;
4d6144de 335 return idata;
bfe5b1b1 336 }
4d6144de 337
1ff8950c 338 idata->buf = kmalloc(idata->buf_bytes, GFP_KERNEL);
cb87ea28
JC
339 if (!idata->buf) {
340 err = -ENOMEM;
aea253ec 341 goto idata_err;
cb87ea28
JC
342 }
343
344 if (copy_from_user(idata->buf, (void __user *)(unsigned long)
345 idata->ic.data_ptr, idata->buf_bytes)) {
346 err = -EFAULT;
347 goto copy_err;
348 }
349
350 return idata;
351
352copy_err:
353 kfree(idata->buf);
aea253ec 354idata_err:
cb87ea28 355 kfree(idata);
aea253ec 356out:
cb87ea28 357 return ERR_PTR(err);
cb87ea28
JC
358}
359
a5f5774c
JH
360static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr,
361 struct mmc_blk_ioc_data *idata)
362{
363 struct mmc_ioc_cmd *ic = &idata->ic;
364
365 if (copy_to_user(&(ic_ptr->response), ic->response,
366 sizeof(ic->response)))
367 return -EFAULT;
368
369 if (!idata->ic.write_flag) {
370 if (copy_to_user((void __user *)(unsigned long)ic->data_ptr,
371 idata->buf, idata->buf_bytes))
372 return -EFAULT;
373 }
374
375 return 0;
376}
377
8d1e977d
LP
378static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status,
379 u32 retries_max)
380{
381 int err;
382 u32 retry_count = 0;
383
384 if (!status || !retries_max)
385 return -EINVAL;
386
387 do {
388 err = get_card_status(card, status, 5);
389 if (err)
390 break;
391
392 if (!R1_STATUS(*status) &&
393 (R1_CURRENT_STATE(*status) != R1_STATE_PRG))
394 break; /* RPMB programming operation complete */
395
396 /*
397 * Rechedule to give the MMC device a chance to continue
398 * processing the previous command without being polled too
399 * frequently.
400 */
401 usleep_range(1000, 5000);
402 } while (++retry_count < retries_max);
403
404 if (retry_count == retries_max)
405 err = -EPERM;
406
407 return err;
408}
409
775a9362
ME
410static int ioctl_do_sanitize(struct mmc_card *card)
411{
412 int err;
413
a2d1086d 414 if (!mmc_can_sanitize(card)) {
775a9362
ME
415 pr_warn("%s: %s - SANITIZE is not supported\n",
416 mmc_hostname(card->host), __func__);
417 err = -EOPNOTSUPP;
418 goto out;
419 }
420
421 pr_debug("%s: %s - SANITIZE IN PROGRESS...\n",
422 mmc_hostname(card->host), __func__);
423
424 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
425 EXT_CSD_SANITIZE_START, 1,
426 MMC_SANITIZE_REQ_TIMEOUT);
427
428 if (err)
429 pr_err("%s: %s - EXT_CSD_SANITIZE_START failed. err=%d\n",
430 mmc_hostname(card->host), __func__, err);
431
432 pr_debug("%s: %s - SANITIZE COMPLETED\n", mmc_hostname(card->host),
433 __func__);
434out:
435 return err;
436}
437
a5f5774c
JH
438static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
439 struct mmc_blk_ioc_data *idata)
cb87ea28 440{
c7836d15
MY
441 struct mmc_command cmd = {};
442 struct mmc_data data = {};
443 struct mmc_request mrq = {};
cb87ea28
JC
444 struct scatterlist sg;
445 int err;
8d1e977d
LP
446 int is_rpmb = false;
447 u32 status = 0;
cb87ea28 448
a5f5774c
JH
449 if (!card || !md || !idata)
450 return -EINVAL;
cb87ea28 451
8d1e977d
LP
452 if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
453 is_rpmb = true;
454
4d6144de
JR
455 cmd.opcode = idata->ic.opcode;
456 cmd.arg = idata->ic.arg;
457 cmd.flags = idata->ic.flags;
458
459 if (idata->buf_bytes) {
460 data.sg = &sg;
461 data.sg_len = 1;
462 data.blksz = idata->ic.blksz;
463 data.blocks = idata->ic.blocks;
464
465 sg_init_one(data.sg, idata->buf, idata->buf_bytes);
466
467 if (idata->ic.write_flag)
468 data.flags = MMC_DATA_WRITE;
469 else
470 data.flags = MMC_DATA_READ;
471
472 /* data.flags must already be set before doing this. */
473 mmc_set_data_timeout(&data, card);
474
475 /* Allow overriding the timeout_ns for empirical tuning. */
476 if (idata->ic.data_timeout_ns)
477 data.timeout_ns = idata->ic.data_timeout_ns;
478
479 if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
480 /*
481 * Pretend this is a data transfer and rely on the
482 * host driver to compute timeout. When all host
483 * drivers support cmd.cmd_timeout for R1B, this
484 * can be changed to:
485 *
486 * mrq.data = NULL;
487 * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
488 */
489 data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
490 }
491
492 mrq.data = &data;
493 }
494
495 mrq.cmd = &cmd;
496
8d1e977d
LP
497 err = mmc_blk_part_switch(card, md);
498 if (err)
a5f5774c 499 return err;
8d1e977d 500
cb87ea28
JC
501 if (idata->ic.is_acmd) {
502 err = mmc_app_cmd(card->host, card);
503 if (err)
a5f5774c 504 return err;
cb87ea28
JC
505 }
506
8d1e977d
LP
507 if (is_rpmb) {
508 err = mmc_set_blockcount(card, data.blocks,
509 idata->ic.write_flag & (1 << 31));
510 if (err)
a5f5774c 511 return err;
8d1e977d
LP
512 }
513
a82e484e
YG
514 if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) &&
515 (cmd.opcode == MMC_SWITCH)) {
775a9362
ME
516 err = ioctl_do_sanitize(card);
517
518 if (err)
519 pr_err("%s: ioctl_do_sanitize() failed. err = %d",
520 __func__, err);
521
a5f5774c 522 return err;
775a9362
ME
523 }
524
cb87ea28
JC
525 mmc_wait_for_req(card->host, &mrq);
526
527 if (cmd.error) {
528 dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
529 __func__, cmd.error);
a5f5774c 530 return cmd.error;
cb87ea28
JC
531 }
532 if (data.error) {
533 dev_err(mmc_dev(card->host), "%s: data error %d\n",
534 __func__, data.error);
a5f5774c 535 return data.error;
cb87ea28
JC
536 }
537
538 /*
539 * According to the SD specs, some commands require a delay after
540 * issuing the command.
541 */
542 if (idata->ic.postsleep_min_us)
543 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
544
a5f5774c 545 memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp));
cb87ea28 546
8d1e977d
LP
547 if (is_rpmb) {
548 /*
549 * Ensure RPMB command has completed by polling CMD13
550 * "Send Status".
551 */
552 err = ioctl_rpmb_card_status_poll(card, &status, 5);
553 if (err)
554 dev_err(mmc_dev(card->host),
555 "%s: Card Status=0x%08X, error %d\n",
556 __func__, status, err);
557 }
558
a5f5774c
JH
559 return err;
560}
561
562static int mmc_blk_ioctl_cmd(struct block_device *bdev,
563 struct mmc_ioc_cmd __user *ic_ptr)
564{
565 struct mmc_blk_ioc_data *idata;
566 struct mmc_blk_data *md;
567 struct mmc_card *card;
b093410c 568 int err = 0, ioc_err = 0;
a5f5774c 569
83c742c3
SL
570 /*
571 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
572 * whole block device, not on a partition. This prevents overspray
573 * between sibling partitions.
574 */
575 if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
576 return -EPERM;
577
a5f5774c
JH
578 idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
579 if (IS_ERR(idata))
580 return PTR_ERR(idata);
581
582 md = mmc_blk_get(bdev->bd_disk);
583 if (!md) {
584 err = -EINVAL;
585 goto cmd_err;
586 }
587
588 card = md->queue.card;
589 if (IS_ERR(card)) {
590 err = PTR_ERR(card);
591 goto cmd_done;
592 }
593
594 mmc_get_card(card);
595
b093410c 596 ioc_err = __mmc_blk_ioctl_cmd(card, md, idata);
a5f5774c 597
3c866568
AH
598 /* Always switch back to main area after RPMB access */
599 if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
600 mmc_blk_part_switch(card, dev_get_drvdata(&card->dev));
601
e94cfef6 602 mmc_put_card(card);
cb87ea28 603
b093410c 604 err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
a5f5774c 605
cb87ea28
JC
606cmd_done:
607 mmc_blk_put(md);
1c02f000 608cmd_err:
cb87ea28
JC
609 kfree(idata->buf);
610 kfree(idata);
b093410c 611 return ioc_err ? ioc_err : err;
cb87ea28
JC
612}
613
a5f5774c
JH
614static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev,
615 struct mmc_ioc_multi_cmd __user *user)
616{
617 struct mmc_blk_ioc_data **idata = NULL;
618 struct mmc_ioc_cmd __user *cmds = user->cmds;
619 struct mmc_card *card;
620 struct mmc_blk_data *md;
b093410c 621 int i, err = 0, ioc_err = 0;
a5f5774c
JH
622 __u64 num_of_cmds;
623
83c742c3
SL
624 /*
625 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
626 * whole block device, not on a partition. This prevents overspray
627 * between sibling partitions.
628 */
629 if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
630 return -EPERM;
631
a5f5774c
JH
632 if (copy_from_user(&num_of_cmds, &user->num_of_cmds,
633 sizeof(num_of_cmds)))
634 return -EFAULT;
635
636 if (num_of_cmds > MMC_IOC_MAX_CMDS)
637 return -EINVAL;
638
639 idata = kcalloc(num_of_cmds, sizeof(*idata), GFP_KERNEL);
640 if (!idata)
641 return -ENOMEM;
642
643 for (i = 0; i < num_of_cmds; i++) {
644 idata[i] = mmc_blk_ioctl_copy_from_user(&cmds[i]);
645 if (IS_ERR(idata[i])) {
646 err = PTR_ERR(idata[i]);
647 num_of_cmds = i;
648 goto cmd_err;
649 }
650 }
651
652 md = mmc_blk_get(bdev->bd_disk);
f00ab14c
OJ
653 if (!md) {
654 err = -EINVAL;
a5f5774c 655 goto cmd_err;
f00ab14c 656 }
a5f5774c
JH
657
658 card = md->queue.card;
659 if (IS_ERR(card)) {
660 err = PTR_ERR(card);
661 goto cmd_done;
662 }
663
664 mmc_get_card(card);
665
b093410c
GG
666 for (i = 0; i < num_of_cmds && !ioc_err; i++)
667 ioc_err = __mmc_blk_ioctl_cmd(card, md, idata[i]);
a5f5774c 668
3c866568
AH
669 /* Always switch back to main area after RPMB access */
670 if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
671 mmc_blk_part_switch(card, dev_get_drvdata(&card->dev));
672
a5f5774c
JH
673 mmc_put_card(card);
674
675 /* copy to user if data and response */
b093410c 676 for (i = 0; i < num_of_cmds && !err; i++)
a5f5774c 677 err = mmc_blk_ioctl_copy_to_user(&cmds[i], idata[i]);
a5f5774c
JH
678
679cmd_done:
680 mmc_blk_put(md);
681cmd_err:
682 for (i = 0; i < num_of_cmds; i++) {
683 kfree(idata[i]->buf);
684 kfree(idata[i]);
685 }
686 kfree(idata);
b093410c 687 return ioc_err ? ioc_err : err;
a5f5774c
JH
688}
689
cb87ea28
JC
690static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
691 unsigned int cmd, unsigned long arg)
692{
a5f5774c
JH
693 switch (cmd) {
694 case MMC_IOC_CMD:
695 return mmc_blk_ioctl_cmd(bdev,
696 (struct mmc_ioc_cmd __user *)arg);
697 case MMC_IOC_MULTI_CMD:
698 return mmc_blk_ioctl_multi_cmd(bdev,
699 (struct mmc_ioc_multi_cmd __user *)arg);
700 default:
701 return -EINVAL;
702 }
cb87ea28
JC
703}
704
705#ifdef CONFIG_COMPAT
706static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
707 unsigned int cmd, unsigned long arg)
708{
709 return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
710}
711#endif
712
83d5cde4 713static const struct block_device_operations mmc_bdops = {
a5a1561f
AV
714 .open = mmc_blk_open,
715 .release = mmc_blk_release,
a885c8c4 716 .getgeo = mmc_blk_getgeo,
1da177e4 717 .owner = THIS_MODULE,
cb87ea28
JC
718 .ioctl = mmc_blk_ioctl,
719#ifdef CONFIG_COMPAT
720 .compat_ioctl = mmc_blk_compat_ioctl,
721#endif
1da177e4
LT
722};
723
371a689f
AW
724static inline int mmc_blk_part_switch(struct mmc_card *card,
725 struct mmc_blk_data *md)
726{
727 int ret;
fc95e30b 728 struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev);
0d7d85ca 729
371a689f
AW
730 if (main_md->part_curr == md->part_type)
731 return 0;
732
733 if (mmc_card_mmc(card)) {
0d7d85ca
AH
734 u8 part_config = card->ext_csd.part_config;
735
57da0c04
AH
736 if (md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
737 mmc_retune_pause(card->host);
738
0d7d85ca
AH
739 part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
740 part_config |= md->part_type;
371a689f
AW
741
742 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
0d7d85ca 743 EXT_CSD_PART_CONFIG, part_config,
371a689f 744 card->ext_csd.part_time);
57da0c04
AH
745 if (ret) {
746 if (md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
747 mmc_retune_unpause(card->host);
371a689f 748 return ret;
57da0c04 749 }
0d7d85ca
AH
750
751 card->ext_csd.part_config = part_config;
57da0c04
AH
752
753 if (main_md->part_curr == EXT_CSD_PART_CONFIG_ACC_RPMB)
754 mmc_retune_unpause(card->host);
67716327 755 }
371a689f
AW
756
757 main_md->part_curr = md->part_type;
758 return 0;
759}
760
169f03a0 761static int mmc_sd_num_wr_blocks(struct mmc_card *card, u32 *written_blocks)
ec5a19dd
PO
762{
763 int err;
051913da
BD
764 u32 result;
765 __be32 *blocks;
ec5a19dd 766
c7836d15
MY
767 struct mmc_request mrq = {};
768 struct mmc_command cmd = {};
769 struct mmc_data data = {};
ec5a19dd
PO
770
771 struct scatterlist sg;
772
ec5a19dd
PO
773 cmd.opcode = MMC_APP_CMD;
774 cmd.arg = card->rca << 16;
7213d175 775 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
ec5a19dd
PO
776
777 err = mmc_wait_for_cmd(card->host, &cmd, 0);
7213d175 778 if (err)
169f03a0 779 return err;
7213d175 780 if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
169f03a0 781 return -EIO;
ec5a19dd
PO
782
783 memset(&cmd, 0, sizeof(struct mmc_command));
784
785 cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
786 cmd.arg = 0;
7213d175 787 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
ec5a19dd 788
ec5a19dd
PO
789 data.blksz = 4;
790 data.blocks = 1;
791 data.flags = MMC_DATA_READ;
792 data.sg = &sg;
793 data.sg_len = 1;
d380443c 794 mmc_set_data_timeout(&data, card);
ec5a19dd 795
ec5a19dd
PO
796 mrq.cmd = &cmd;
797 mrq.data = &data;
798
051913da
BD
799 blocks = kmalloc(4, GFP_KERNEL);
800 if (!blocks)
169f03a0 801 return -ENOMEM;
051913da
BD
802
803 sg_init_one(&sg, blocks, 4);
ec5a19dd
PO
804
805 mmc_wait_for_req(card->host, &mrq);
806
051913da
BD
807 result = ntohl(*blocks);
808 kfree(blocks);
809
17b0429d 810 if (cmd.error || data.error)
169f03a0
LW
811 return -EIO;
812
813 *written_blocks = result;
ec5a19dd 814
169f03a0 815 return 0;
ec5a19dd
PO
816}
817
0a2d4048 818static int get_card_status(struct mmc_card *card, u32 *status, int retries)
504f191f 819{
c7836d15 820 struct mmc_command cmd = {};
504f191f
AH
821 int err;
822
504f191f
AH
823 cmd.opcode = MMC_SEND_STATUS;
824 if (!mmc_host_is_spi(card->host))
825 cmd.arg = card->rca << 16;
826 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
0a2d4048
RKAL
827 err = mmc_wait_for_cmd(card->host, &cmd, retries);
828 if (err == 0)
829 *status = cmd.resp[0];
830 return err;
504f191f
AH
831}
832
c49433fb 833static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
c44d6cef 834 bool hw_busy_detect, struct request *req, bool *gen_err)
c49433fb
UH
835{
836 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
837 int err = 0;
838 u32 status;
839
840 do {
841 err = get_card_status(card, &status, 5);
842 if (err) {
843 pr_err("%s: error %d requesting status\n",
844 req->rq_disk->disk_name, err);
845 return err;
846 }
847
848 if (status & R1_ERROR) {
849 pr_err("%s: %s: error sending status cmd, status %#x\n",
850 req->rq_disk->disk_name, __func__, status);
c44d6cef 851 *gen_err = true;
c49433fb
UH
852 }
853
95a91298
UH
854 /* We may rely on the host hw to handle busy detection.*/
855 if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) &&
856 hw_busy_detect)
857 break;
858
c49433fb
UH
859 /*
860 * Timeout if the device never becomes ready for data and never
861 * leaves the program state.
862 */
863 if (time_after(jiffies, timeout)) {
864 pr_err("%s: Card stuck in programming state! %s %s\n",
865 mmc_hostname(card->host),
866 req->rq_disk->disk_name, __func__);
867 return -ETIMEDOUT;
868 }
869
870 /*
871 * Some cards mishandle the status bits,
872 * so make sure to check both the busy
873 * indication and the card state.
874 */
875 } while (!(status & R1_READY_FOR_DATA) ||
876 (R1_CURRENT_STATE(status) == R1_STATE_PRG));
877
878 return err;
879}
880
bb5cba40 881static int send_stop(struct mmc_card *card, unsigned int timeout_ms,
c44d6cef 882 struct request *req, bool *gen_err, u32 *stop_status)
bb5cba40
UH
883{
884 struct mmc_host *host = card->host;
c7836d15 885 struct mmc_command cmd = {};
bb5cba40
UH
886 int err;
887 bool use_r1b_resp = rq_data_dir(req) == WRITE;
888
889 /*
890 * Normally we use R1B responses for WRITE, but in cases where the host
891 * has specified a max_busy_timeout we need to validate it. A failure
892 * means we need to prevent the host from doing hw busy detection, which
893 * is done by converting to a R1 response instead.
894 */
895 if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout))
896 use_r1b_resp = false;
897
898 cmd.opcode = MMC_STOP_TRANSMISSION;
899 if (use_r1b_resp) {
900 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
901 cmd.busy_timeout = timeout_ms;
902 } else {
903 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
904 }
905
906 err = mmc_wait_for_cmd(host, &cmd, 5);
907 if (err)
908 return err;
909
910 *stop_status = cmd.resp[0];
911
912 /* No need to check card status in case of READ. */
913 if (rq_data_dir(req) == READ)
914 return 0;
915
916 if (!mmc_host_is_spi(host) &&
917 (*stop_status & R1_ERROR)) {
918 pr_err("%s: %s: general error sending stop command, resp %#x\n",
919 req->rq_disk->disk_name, __func__, *stop_status);
c44d6cef 920 *gen_err = true;
bb5cba40
UH
921 }
922
923 return card_busy_detect(card, timeout_ms, use_r1b_resp, req, gen_err);
924}
925
a8ad82cc 926#define ERR_NOMEDIUM 3
a01f3ccf
RKAL
927#define ERR_RETRY 2
928#define ERR_ABORT 1
929#define ERR_CONTINUE 0
930
931static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
932 bool status_valid, u32 status)
933{
934 switch (error) {
935 case -EILSEQ:
936 /* response crc error, retry the r/w cmd */
937 pr_err("%s: %s sending %s command, card status %#x\n",
938 req->rq_disk->disk_name, "response CRC error",
939 name, status);
940 return ERR_RETRY;
941
942 case -ETIMEDOUT:
943 pr_err("%s: %s sending %s command, card status %#x\n",
944 req->rq_disk->disk_name, "timed out", name, status);
945
946 /* If the status cmd initially failed, retry the r/w cmd */
cc4d04be
KS
947 if (!status_valid) {
948 pr_err("%s: status not valid, retrying timeout\n",
949 req->rq_disk->disk_name);
a01f3ccf 950 return ERR_RETRY;
cc4d04be 951 }
a01f3ccf
RKAL
952
953 /*
954 * If it was a r/w cmd crc error, or illegal command
955 * (eg, issued in wrong state) then retry - we should
956 * have corrected the state problem above.
957 */
cc4d04be
KS
958 if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) {
959 pr_err("%s: command error, retrying timeout\n",
960 req->rq_disk->disk_name);
a01f3ccf 961 return ERR_RETRY;
cc4d04be 962 }
a01f3ccf
RKAL
963
964 /* Otherwise abort the command */
965 return ERR_ABORT;
966
967 default:
968 /* We don't understand the error code the driver gave us */
969 pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
970 req->rq_disk->disk_name, error, status);
971 return ERR_ABORT;
972 }
973}
974
975/*
976 * Initial r/w and stop cmd error recovery.
977 * We don't know whether the card received the r/w cmd or not, so try to
978 * restore things back to a sane state. Essentially, we do this as follows:
979 * - Obtain card status. If the first attempt to obtain card status fails,
980 * the status word will reflect the failed status cmd, not the failed
981 * r/w cmd. If we fail to obtain card status, it suggests we can no
982 * longer communicate with the card.
983 * - Check the card state. If the card received the cmd but there was a
984 * transient problem with the response, it might still be in a data transfer
985 * mode. Try to send it a stop command. If this fails, we can't recover.
986 * - If the r/w cmd failed due to a response CRC error, it was probably
987 * transient, so retry the cmd.
988 * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
989 * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
990 * illegal cmd, retry.
991 * Otherwise we don't understand what happened, so abort.
992 */
993static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
2cc64587 994 struct mmc_blk_request *brq, bool *ecc_err, bool *gen_err)
a01f3ccf
RKAL
995{
996 bool prev_cmd_status_valid = true;
997 u32 status, stop_status = 0;
998 int err, retry;
999
a8ad82cc
SRT
1000 if (mmc_card_removed(card))
1001 return ERR_NOMEDIUM;
1002
a01f3ccf
RKAL
1003 /*
1004 * Try to get card status which indicates both the card state
1005 * and why there was no response. If the first attempt fails,
1006 * we can't be sure the returned status is for the r/w command.
1007 */
1008 for (retry = 2; retry >= 0; retry--) {
1009 err = get_card_status(card, &status, 0);
1010 if (!err)
1011 break;
1012
6f398ad2
AH
1013 /* Re-tune if needed */
1014 mmc_retune_recheck(card->host);
1015
a01f3ccf
RKAL
1016 prev_cmd_status_valid = false;
1017 pr_err("%s: error %d sending status command, %sing\n",
1018 req->rq_disk->disk_name, err, retry ? "retry" : "abort");
1019 }
1020
1021 /* We couldn't get a response from the card. Give up. */
a8ad82cc
SRT
1022 if (err) {
1023 /* Check if the card is removed */
1024 if (mmc_detect_card_removed(card->host))
1025 return ERR_NOMEDIUM;
a01f3ccf 1026 return ERR_ABORT;
a8ad82cc 1027 }
a01f3ccf 1028
67716327
AH
1029 /* Flag ECC errors */
1030 if ((status & R1_CARD_ECC_FAILED) ||
1031 (brq->stop.resp[0] & R1_CARD_ECC_FAILED) ||
1032 (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
2cc64587 1033 *ecc_err = true;
67716327 1034
c8760069
KY
1035 /* Flag General errors */
1036 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
1037 if ((status & R1_ERROR) ||
1038 (brq->stop.resp[0] & R1_ERROR)) {
1039 pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
1040 req->rq_disk->disk_name, __func__,
1041 brq->stop.resp[0], status);
c44d6cef 1042 *gen_err = true;
c8760069
KY
1043 }
1044
a01f3ccf
RKAL
1045 /*
1046 * Check the current card state. If it is in some data transfer
1047 * mode, tell it to stop (and hopefully transition back to TRAN.)
1048 */
1049 if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
1050 R1_CURRENT_STATE(status) == R1_STATE_RCV) {
bb5cba40
UH
1051 err = send_stop(card,
1052 DIV_ROUND_UP(brq->data.timeout_ns, 1000000),
1053 req, gen_err, &stop_status);
1054 if (err) {
a01f3ccf
RKAL
1055 pr_err("%s: error %d sending stop command\n",
1056 req->rq_disk->disk_name, err);
bb5cba40
UH
1057 /*
1058 * If the stop cmd also timed out, the card is probably
1059 * not present, so abort. Other errors are bad news too.
1060 */
a01f3ccf 1061 return ERR_ABORT;
bb5cba40
UH
1062 }
1063
67716327 1064 if (stop_status & R1_CARD_ECC_FAILED)
2cc64587 1065 *ecc_err = true;
a01f3ccf
RKAL
1066 }
1067
1068 /* Check for set block count errors */
1069 if (brq->sbc.error)
1070 return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error,
1071 prev_cmd_status_valid, status);
1072
1073 /* Check for r/w command errors */
1074 if (brq->cmd.error)
1075 return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
1076 prev_cmd_status_valid, status);
1077
67716327
AH
1078 /* Data errors */
1079 if (!brq->stop.error)
1080 return ERR_CONTINUE;
1081
a01f3ccf 1082 /* Now for stop errors. These aren't fatal to the transfer. */
5e1344eb 1083 pr_info("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
a01f3ccf
RKAL
1084 req->rq_disk->disk_name, brq->stop.error,
1085 brq->cmd.resp[0], status);
1086
1087 /*
1088 * Subsitute in our own stop status as this will give the error
1089 * state which happened during the execution of the r/w command.
1090 */
1091 if (stop_status) {
1092 brq->stop.resp[0] = stop_status;
1093 brq->stop.error = 0;
1094 }
1095 return ERR_CONTINUE;
1096}
1097
67716327
AH
1098static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
1099 int type)
1100{
1101 int err;
1102
1103 if (md->reset_done & type)
1104 return -EEXIST;
1105
1106 md->reset_done |= type;
1107 err = mmc_hw_reset(host);
1108 /* Ensure we switch back to the correct partition */
1109 if (err != -EOPNOTSUPP) {
fc95e30b
UH
1110 struct mmc_blk_data *main_md =
1111 dev_get_drvdata(&host->card->dev);
67716327
AH
1112 int part_err;
1113
1114 main_md->part_curr = main_md->part_type;
1115 part_err = mmc_blk_part_switch(host->card, md);
1116 if (part_err) {
1117 /*
1118 * We have failed to get back into the correct
1119 * partition, so we need to abort the whole request.
1120 */
1121 return -ENODEV;
1122 }
1123 }
1124 return err;
1125}
1126
1127static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
1128{
1129 md->reset_done &= ~type;
1130}
1131
4e93b9a6
CD
1132int mmc_access_rpmb(struct mmc_queue *mq)
1133{
7db3028e 1134 struct mmc_blk_data *md = mq->blkdata;
4e93b9a6
CD
1135 /*
1136 * If this is a RPMB partition access, return ture
1137 */
1138 if (md && md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
1139 return true;
1140
1141 return false;
1142}
1143
df061588 1144static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
bd788c96 1145{
7db3028e 1146 struct mmc_blk_data *md = mq->blkdata;
bd788c96
AH
1147 struct mmc_card *card = md->queue.card;
1148 unsigned int from, nr, arg;
67716327 1149 int err = 0, type = MMC_BLK_DISCARD;
bd788c96 1150
bd788c96
AH
1151 if (!mmc_can_erase(card)) {
1152 err = -EOPNOTSUPP;
8cb6ed17 1153 goto fail;
bd788c96
AH
1154 }
1155
1156 from = blk_rq_pos(req);
1157 nr = blk_rq_sectors(req);
1158
b3bf9153
KP
1159 if (mmc_can_discard(card))
1160 arg = MMC_DISCARD_ARG;
1161 else if (mmc_can_trim(card))
bd788c96
AH
1162 arg = MMC_TRIM_ARG;
1163 else
1164 arg = MMC_ERASE_ARG;
164b50b3
GU
1165 do {
1166 err = 0;
1167 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1168 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1169 INAND_CMD38_ARG_EXT_CSD,
1170 arg == MMC_TRIM_ARG ?
1171 INAND_CMD38_ARG_TRIM :
1172 INAND_CMD38_ARG_ERASE,
1173 0);
1174 }
1175 if (!err)
1176 err = mmc_erase(card, from, nr, arg);
1177 } while (err == -EIO && !mmc_blk_reset(md, card->host, type));
67716327
AH
1178 if (!err)
1179 mmc_blk_reset_success(md, type);
8cb6ed17 1180fail:
ecf8b5d0 1181 blk_end_request(req, err, blk_rq_bytes(req));
bd788c96
AH
1182}
1183
df061588 1184static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
49804548
AH
1185 struct request *req)
1186{
7db3028e 1187 struct mmc_blk_data *md = mq->blkdata;
49804548 1188 struct mmc_card *card = md->queue.card;
775a9362 1189 unsigned int from, nr, arg;
67716327 1190 int err = 0, type = MMC_BLK_SECDISCARD;
49804548 1191
775a9362 1192 if (!(mmc_can_secure_erase_trim(card))) {
49804548
AH
1193 err = -EOPNOTSUPP;
1194 goto out;
1195 }
1196
28302812
AH
1197 from = blk_rq_pos(req);
1198 nr = blk_rq_sectors(req);
1199
775a9362
ME
1200 if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
1201 arg = MMC_SECURE_TRIM1_ARG;
1202 else
1203 arg = MMC_SECURE_ERASE_ARG;
d9ddd629 1204
67716327 1205retry:
6a7a6b45
AW
1206 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1207 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1208 INAND_CMD38_ARG_EXT_CSD,
1209 arg == MMC_SECURE_TRIM1_ARG ?
1210 INAND_CMD38_ARG_SECTRIM1 :
1211 INAND_CMD38_ARG_SECERASE,
1212 0);
1213 if (err)
28302812 1214 goto out_retry;
6a7a6b45 1215 }
28302812 1216
49804548 1217 err = mmc_erase(card, from, nr, arg);
28302812
AH
1218 if (err == -EIO)
1219 goto out_retry;
1220 if (err)
1221 goto out;
1222
1223 if (arg == MMC_SECURE_TRIM1_ARG) {
6a7a6b45
AW
1224 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1225 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1226 INAND_CMD38_ARG_EXT_CSD,
1227 INAND_CMD38_ARG_SECTRIM2,
1228 0);
1229 if (err)
28302812 1230 goto out_retry;
6a7a6b45 1231 }
28302812 1232
49804548 1233 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
28302812
AH
1234 if (err == -EIO)
1235 goto out_retry;
1236 if (err)
1237 goto out;
6a7a6b45 1238 }
28302812 1239
28302812
AH
1240out_retry:
1241 if (err && !mmc_blk_reset(md, card->host, type))
67716327
AH
1242 goto retry;
1243 if (!err)
1244 mmc_blk_reset_success(md, type);
28302812 1245out:
ecf8b5d0 1246 blk_end_request(req, err, blk_rq_bytes(req));
49804548
AH
1247}
1248
df061588 1249static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
f4c5522b 1250{
7db3028e 1251 struct mmc_blk_data *md = mq->blkdata;
881d1c25
SJ
1252 struct mmc_card *card = md->queue.card;
1253 int ret = 0;
1254
1255 ret = mmc_flush_cache(card);
1256 if (ret)
1257 ret = -EIO;
f4c5522b 1258
ecf8b5d0 1259 blk_end_request_all(req, ret);
f4c5522b
AW
1260}
1261
1262/*
1263 * Reformat current write as a reliable write, supporting
1264 * both legacy and the enhanced reliable write MMC cards.
1265 * In each transfer we'll handle only as much as a single
1266 * reliable write can handle, thus finish the request in
1267 * partial completions.
1268 */
d0c97cfb
AW
1269static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
1270 struct mmc_card *card,
1271 struct request *req)
f4c5522b 1272{
f4c5522b
AW
1273 if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
1274 /* Legacy mode imposes restrictions on transfers. */
1275 if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
1276 brq->data.blocks = 1;
1277
1278 if (brq->data.blocks > card->ext_csd.rel_sectors)
1279 brq->data.blocks = card->ext_csd.rel_sectors;
1280 else if (brq->data.blocks < card->ext_csd.rel_sectors)
1281 brq->data.blocks = 1;
1282 }
f4c5522b
AW
1283}
1284
4c2b8f26
RKAL
1285#define CMD_ERRORS \
1286 (R1_OUT_OF_RANGE | /* Command argument out of range */ \
1287 R1_ADDRESS_ERROR | /* Misaligned address */ \
1288 R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\
1289 R1_WP_VIOLATION | /* Tried to write to protected block */ \
1290 R1_CC_ERROR | /* Card controller error */ \
1291 R1_ERROR) /* General/unknown error */
1292
8e8b3f51
LW
1293static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card,
1294 struct mmc_async_req *areq)
d78d4a8a 1295{
ee8a43a5 1296 struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
74f5ba35 1297 areq);
ee8a43a5
PF
1298 struct mmc_blk_request *brq = &mq_mrq->brq;
1299 struct request *req = mq_mrq->req;
b8360a49 1300 int need_retune = card->host->need_retune;
2cc64587 1301 bool ecc_err = false;
c44d6cef 1302 bool gen_err = false;
d78d4a8a
PF
1303
1304 /*
1305 * sbc.error indicates a problem with the set block count
1306 * command. No data will have been transferred.
1307 *
1308 * cmd.error indicates a problem with the r/w command. No
1309 * data will have been transferred.
1310 *
1311 * stop.error indicates a problem with the stop command. Data
1312 * may have been transferred, or may still be transferring.
1313 */
67716327
AH
1314 if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
1315 brq->data.error) {
c8760069 1316 switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
d78d4a8a
PF
1317 case ERR_RETRY:
1318 return MMC_BLK_RETRY;
1319 case ERR_ABORT:
1320 return MMC_BLK_ABORT;
a8ad82cc
SRT
1321 case ERR_NOMEDIUM:
1322 return MMC_BLK_NOMEDIUM;
d78d4a8a
PF
1323 case ERR_CONTINUE:
1324 break;
1325 }
1326 }
1327
1328 /*
1329 * Check for errors relating to the execution of the
1330 * initial command - such as address errors. No data
1331 * has been transferred.
1332 */
1333 if (brq->cmd.resp[0] & CMD_ERRORS) {
1334 pr_err("%s: r/w command failed, status = %#x\n",
1335 req->rq_disk->disk_name, brq->cmd.resp[0]);
1336 return MMC_BLK_ABORT;
1337 }
1338
1339 /*
1340 * Everything else is either success, or a data error of some
1341 * kind. If it was a write, we may have transitioned to
1342 * program mode, which we have to wait for it to complete.
1343 */
1344 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
c49433fb 1345 int err;
8fee476b 1346
c8760069
KY
1347 /* Check stop command response */
1348 if (brq->stop.resp[0] & R1_ERROR) {
1349 pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
1350 req->rq_disk->disk_name, __func__,
1351 brq->stop.resp[0]);
c44d6cef 1352 gen_err = true;
c8760069
KY
1353 }
1354
95a91298
UH
1355 err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, false, req,
1356 &gen_err);
c49433fb
UH
1357 if (err)
1358 return MMC_BLK_CMD_ERR;
d78d4a8a
PF
1359 }
1360
c8760069
KY
1361 /* if general error occurs, retry the write operation. */
1362 if (gen_err) {
1363 pr_warn("%s: retrying write for general error\n",
1364 req->rq_disk->disk_name);
1365 return MMC_BLK_RETRY;
1366 }
1367
d78d4a8a 1368 if (brq->data.error) {
b8360a49 1369 if (need_retune && !brq->retune_retry_done) {
09faf61d
RK
1370 pr_debug("%s: retrying because a re-tune was needed\n",
1371 req->rq_disk->disk_name);
b8360a49
AH
1372 brq->retune_retry_done = 1;
1373 return MMC_BLK_RETRY;
1374 }
d78d4a8a
PF
1375 pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
1376 req->rq_disk->disk_name, brq->data.error,
1377 (unsigned)blk_rq_pos(req),
1378 (unsigned)blk_rq_sectors(req),
1379 brq->cmd.resp[0], brq->stop.resp[0]);
1380
1381 if (rq_data_dir(req) == READ) {
67716327
AH
1382 if (ecc_err)
1383 return MMC_BLK_ECC_ERR;
d78d4a8a
PF
1384 return MMC_BLK_DATA_ERR;
1385 } else {
1386 return MMC_BLK_CMD_ERR;
1387 }
1388 }
1389
67716327
AH
1390 if (!brq->data.bytes_xfered)
1391 return MMC_BLK_RETRY;
d78d4a8a 1392
67716327
AH
1393 if (blk_rq_bytes(req) != brq->data.bytes_xfered)
1394 return MMC_BLK_PARTIAL;
1395
1396 return MMC_BLK_SUCCESS;
d78d4a8a
PF
1397}
1398
54d49d77
PF
1399static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
1400 struct mmc_card *card,
1401 int disable_multi,
1402 struct mmc_queue *mq)
1da177e4 1403{
54d49d77
PF
1404 u32 readcmd, writecmd;
1405 struct mmc_blk_request *brq = &mqrq->brq;
1406 struct request *req = mqrq->req;
7db3028e 1407 struct mmc_blk_data *md = mq->blkdata;
4265900e 1408 bool do_data_tag;
1da177e4 1409
f4c5522b
AW
1410 /*
1411 * Reliable writes are used to implement Forced Unit Access and
d3df0465 1412 * are supported only on MMCs.
f4c5522b 1413 */
d3df0465 1414 bool do_rel_wr = (req->cmd_flags & REQ_FUA) &&
f4c5522b 1415 (rq_data_dir(req) == WRITE) &&
d0c97cfb 1416 (md->flags & MMC_BLK_REL_WR);
f4c5522b 1417
54d49d77
PF
1418 memset(brq, 0, sizeof(struct mmc_blk_request));
1419 brq->mrq.cmd = &brq->cmd;
1420 brq->mrq.data = &brq->data;
1da177e4 1421
54d49d77
PF
1422 brq->cmd.arg = blk_rq_pos(req);
1423 if (!mmc_card_blockaddr(card))
1424 brq->cmd.arg <<= 9;
1425 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
1426 brq->data.blksz = 512;
1427 brq->stop.opcode = MMC_STOP_TRANSMISSION;
1428 brq->stop.arg = 0;
54d49d77 1429 brq->data.blocks = blk_rq_sectors(req);
6a79e391 1430
54d49d77
PF
1431 /*
1432 * The block layer doesn't support all sector count
1433 * restrictions, so we need to be prepared for too big
1434 * requests.
1435 */
1436 if (brq->data.blocks > card->host->max_blk_count)
1437 brq->data.blocks = card->host->max_blk_count;
1da177e4 1438
2bf22b39
PW
1439 if (brq->data.blocks > 1) {
1440 /*
1441 * After a read error, we redo the request one sector
1442 * at a time in order to accurately determine which
1443 * sectors can be read successfully.
1444 */
1445 if (disable_multi)
1446 brq->data.blocks = 1;
1447
2e47e842
KM
1448 /*
1449 * Some controllers have HW issues while operating
1450 * in multiple I/O mode
1451 */
1452 if (card->host->ops->multi_io_quirk)
1453 brq->data.blocks = card->host->ops->multi_io_quirk(card,
1454 (rq_data_dir(req) == READ) ?
1455 MMC_DATA_READ : MMC_DATA_WRITE,
1456 brq->data.blocks);
2bf22b39 1457 }
d0c97cfb 1458
54d49d77
PF
1459 if (brq->data.blocks > 1 || do_rel_wr) {
1460 /* SPI multiblock writes terminate using a special
1461 * token, not a STOP_TRANSMISSION request.
d0c97cfb 1462 */
54d49d77
PF
1463 if (!mmc_host_is_spi(card->host) ||
1464 rq_data_dir(req) == READ)
1465 brq->mrq.stop = &brq->stop;
1466 readcmd = MMC_READ_MULTIPLE_BLOCK;
1467 writecmd = MMC_WRITE_MULTIPLE_BLOCK;
1468 } else {
1469 brq->mrq.stop = NULL;
1470 readcmd = MMC_READ_SINGLE_BLOCK;
1471 writecmd = MMC_WRITE_BLOCK;
1472 }
1473 if (rq_data_dir(req) == READ) {
1474 brq->cmd.opcode = readcmd;
f53f1102 1475 brq->data.flags = MMC_DATA_READ;
bcc3e172
UH
1476 if (brq->mrq.stop)
1477 brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 |
1478 MMC_CMD_AC;
54d49d77
PF
1479 } else {
1480 brq->cmd.opcode = writecmd;
f53f1102 1481 brq->data.flags = MMC_DATA_WRITE;
bcc3e172
UH
1482 if (brq->mrq.stop)
1483 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B |
1484 MMC_CMD_AC;
54d49d77 1485 }
d0c97cfb 1486
54d49d77
PF
1487 if (do_rel_wr)
1488 mmc_apply_rel_rw(brq, card, req);
f4c5522b 1489
4265900e
SD
1490 /*
1491 * Data tag is used only during writing meta data to speed
1492 * up write and any subsequent read of this meta data
1493 */
1494 do_data_tag = (card->ext_csd.data_tag_unit_size) &&
1495 (req->cmd_flags & REQ_META) &&
1496 (rq_data_dir(req) == WRITE) &&
1497 ((brq->data.blocks * brq->data.blksz) >=
1498 card->ext_csd.data_tag_unit_size);
1499
54d49d77
PF
1500 /*
1501 * Pre-defined multi-block transfers are preferable to
1502 * open ended-ones (and necessary for reliable writes).
1503 * However, it is not sufficient to just send CMD23,
1504 * and avoid the final CMD12, as on an error condition
1505 * CMD12 (stop) needs to be sent anyway. This, coupled
1506 * with Auto-CMD23 enhancements provided by some
1507 * hosts, means that the complexity of dealing
1508 * with this is best left to the host. If CMD23 is
1509 * supported by card and host, we'll fill sbc in and let
1510 * the host deal with handling it correctly. This means
1511 * that for hosts that don't expose MMC_CAP_CMD23, no
1512 * change of behavior will be observed.
1513 *
1514 * N.B: Some MMC cards experience perf degradation.
1515 * We'll avoid using CMD23-bounded multiblock writes for
1516 * these, while retaining features like reliable writes.
1517 */
4265900e
SD
1518 if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) &&
1519 (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) ||
1520 do_data_tag)) {
54d49d77
PF
1521 brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
1522 brq->sbc.arg = brq->data.blocks |
4265900e
SD
1523 (do_rel_wr ? (1 << 31) : 0) |
1524 (do_data_tag ? (1 << 29) : 0);
54d49d77
PF
1525 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
1526 brq->mrq.sbc = &brq->sbc;
1527 }
98ccf149 1528
54d49d77
PF
1529 mmc_set_data_timeout(&brq->data, card);
1530
1531 brq->data.sg = mqrq->sg;
1532 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
1533
1534 /*
1535 * Adjust the sg list so it is the same size as the
1536 * request.
1537 */
1538 if (brq->data.blocks != blk_rq_sectors(req)) {
1539 int i, data_size = brq->data.blocks << 9;
1540 struct scatterlist *sg;
1541
1542 for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
1543 data_size -= sg->length;
1544 if (data_size <= 0) {
1545 sg->length += data_size;
1546 i++;
1547 break;
6a79e391 1548 }
6a79e391 1549 }
54d49d77
PF
1550 brq->data.sg_len = i;
1551 }
1552
74f5ba35
LW
1553 mqrq->areq.mrq = &brq->mrq;
1554 mqrq->areq.err_check = mmc_blk_err_check;
ee8a43a5 1555
54d49d77
PF
1556 mmc_queue_bounce_pre(mqrq);
1557}
6a79e391 1558
0e65f10c
LW
1559static bool mmc_blk_rw_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
1560 struct mmc_blk_request *brq, struct request *req,
1561 bool old_req_pending)
67716327 1562{
0e65f10c
LW
1563 bool req_pending;
1564
67716327
AH
1565 /*
1566 * If this is an SD card and we're writing, we can first
1567 * mark the known good sectors as ok.
1568 *
1569 * If the card is not SD, we can still ok written sectors
1570 * as reported by the controller (which might be less than
1571 * the real number of written sectors, but never more).
1572 */
1573 if (mmc_card_sd(card)) {
1574 u32 blocks;
169f03a0 1575 int err;
67716327 1576
169f03a0 1577 err = mmc_sd_num_wr_blocks(card, &blocks);
0e65f10c
LW
1578 if (err)
1579 req_pending = old_req_pending;
1580 else
1581 req_pending = blk_end_request(req, 0, blocks << 9);
5dd784d2 1582 } else {
0e65f10c 1583 req_pending = blk_end_request(req, 0, brq->data.bytes_xfered);
ce39f9d1 1584 }
0e65f10c 1585 return req_pending;
ce39f9d1
SJ
1586}
1587
4e1f7800
LW
1588static void mmc_blk_rw_cmd_abort(struct mmc_card *card, struct request *req)
1589{
4e1f7800
LW
1590 if (mmc_card_removed(card))
1591 req->rq_flags |= RQF_QUIET;
0e65f10c 1592 while (blk_end_request(req, -EIO, blk_rq_cur_bytes(req)));
4e1f7800
LW
1593}
1594
b2928e10
LW
1595/**
1596 * mmc_blk_rw_try_restart() - tries to restart the current async request
1597 * @mq: the queue with the card and host to restart
1598 * @req: a new request that want to be started after the current one
1599 */
1600static void mmc_blk_rw_try_restart(struct mmc_queue *mq, struct request *req)
efb5a05e
LW
1601{
1602 if (!req)
1603 return;
1604
b2928e10
LW
1605 /*
1606 * If the card was removed, just cancel everything and return.
1607 */
1608 if (mmc_card_removed(mq->card)) {
efb5a05e
LW
1609 req->rq_flags |= RQF_QUIET;
1610 blk_end_request_all(req, -EIO);
b2928e10 1611 return;
efb5a05e 1612 }
b2928e10
LW
1613 /* Else proceed and try to restart the current async request */
1614 mmc_blk_rw_rq_prep(mq->mqrq_cur, mq->card, 0, mq);
74f5ba35 1615 mmc_start_areq(mq->card->host, &mq->mqrq_cur->areq, NULL);
efb5a05e
LW
1616}
1617
acd8dbd6 1618static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
54d49d77 1619{
7db3028e 1620 struct mmc_blk_data *md = mq->blkdata;
54d49d77 1621 struct mmc_card *card = md->queue.card;
5be80375 1622 struct mmc_blk_request *brq;
0e65f10c 1623 int disable_multi = 0, retry = 0, type, retune_retry_done = 0;
d78d4a8a 1624 enum mmc_blk_status status;
ee8a43a5 1625 struct mmc_queue_req *mq_rq;
acd8dbd6 1626 struct request *old_req;
7d552a48
LW
1627 struct mmc_async_req *new_areq;
1628 struct mmc_async_req *old_areq;
0e65f10c 1629 bool req_pending = true;
1da177e4 1630
acd8dbd6 1631 if (!new_req && !mq->mqrq_prev->req)
df061588 1632 return;
98ccf149 1633
ee8a43a5 1634 do {
acd8dbd6 1635 if (new_req) {
a5075eb9
SD
1636 /*
1637 * When 4KB native sector is enabled, only 8 blocks
1638 * multiple read or write is allowed
1639 */
e87c8561 1640 if (mmc_large_sector(card) &&
acd8dbd6 1641 !IS_ALIGNED(blk_rq_sectors(new_req), 8)) {
a5075eb9 1642 pr_err("%s: Transfer size is not 4KB sector size aligned\n",
acd8dbd6
LW
1643 new_req->rq_disk->disk_name);
1644 mmc_blk_rw_cmd_abort(card, new_req);
df061588 1645 return;
a5075eb9 1646 }
ce39f9d1 1647
03d640ae 1648 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
74f5ba35 1649 new_areq = &mq->mqrq_cur->areq;
ee8a43a5 1650 } else
7d552a48
LW
1651 new_areq = NULL;
1652
c3399ef5 1653 old_areq = mmc_start_areq(card->host, new_areq, &status);
7d552a48 1654 if (!old_areq) {
da0dbaff
LW
1655 /*
1656 * We have just put the first request into the pipeline
1657 * and there is nothing more to do until it is
1658 * complete.
1659 */
2220eedf 1660 if (status == MMC_BLK_NEW_REQUEST)
9491be5f 1661 mq->new_request = true;
df061588 1662 return;
2220eedf 1663 }
ee8a43a5 1664
da0dbaff
LW
1665 /*
1666 * An asynchronous request has been completed and we proceed
1667 * to handle the result of it.
1668 */
74f5ba35 1669 mq_rq = container_of(old_areq, struct mmc_queue_req, areq);
ee8a43a5 1670 brq = &mq_rq->brq;
acd8dbd6
LW
1671 old_req = mq_rq->req;
1672 type = rq_data_dir(old_req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
ee8a43a5 1673 mmc_queue_bounce_post(mq_rq);
98ccf149 1674
d78d4a8a
PF
1675 switch (status) {
1676 case MMC_BLK_SUCCESS:
1677 case MMC_BLK_PARTIAL:
1678 /*
1679 * A block was successfully transferred.
1680 */
67716327 1681 mmc_blk_reset_success(md, type);
ce39f9d1 1682
0e65f10c
LW
1683 req_pending = blk_end_request(old_req, 0,
1684 brq->data.bytes_xfered);
67716327
AH
1685 /*
1686 * If the blk_end_request function returns non-zero even
1687 * though all data has been transferred and no errors
1688 * were returned by the host controller, it's a bug.
1689 */
0e65f10c 1690 if (status == MMC_BLK_SUCCESS && req_pending) {
a3c76eb9 1691 pr_err("%s BUG rq_tot %d d_xfer %d\n",
acd8dbd6 1692 __func__, blk_rq_bytes(old_req),
ee8a43a5 1693 brq->data.bytes_xfered);
acd8dbd6 1694 mmc_blk_rw_cmd_abort(card, old_req);
df061588 1695 return;
ee8a43a5 1696 }
d78d4a8a
PF
1697 break;
1698 case MMC_BLK_CMD_ERR:
0e65f10c 1699 req_pending = mmc_blk_rw_cmd_err(md, card, brq, old_req, req_pending);
db435505 1700 if (mmc_blk_reset(md, card->host, type)) {
8ecc3444
AH
1701 if (req_pending)
1702 mmc_blk_rw_cmd_abort(card, old_req);
b2928e10 1703 mmc_blk_rw_try_restart(mq, new_req);
db435505
LW
1704 return;
1705 }
0e65f10c 1706 if (!req_pending) {
b2928e10 1707 mmc_blk_rw_try_restart(mq, new_req);
db435505
LW
1708 return;
1709 }
29535f7b 1710 break;
d78d4a8a 1711 case MMC_BLK_RETRY:
b8360a49 1712 retune_retry_done = brq->retune_retry_done;
d78d4a8a 1713 if (retry++ < 5)
a01f3ccf 1714 break;
67716327 1715 /* Fall through */
d78d4a8a 1716 case MMC_BLK_ABORT:
67716327
AH
1717 if (!mmc_blk_reset(md, card->host, type))
1718 break;
acd8dbd6 1719 mmc_blk_rw_cmd_abort(card, old_req);
b2928e10 1720 mmc_blk_rw_try_restart(mq, new_req);
db435505 1721 return;
67716327
AH
1722 case MMC_BLK_DATA_ERR: {
1723 int err;
1724
1725 err = mmc_blk_reset(md, card->host, type);
1726 if (!err)
1727 break;
db435505 1728 if (err == -ENODEV) {
acd8dbd6 1729 mmc_blk_rw_cmd_abort(card, old_req);
b2928e10 1730 mmc_blk_rw_try_restart(mq, new_req);
db435505
LW
1731 return;
1732 }
67716327
AH
1733 /* Fall through */
1734 }
1735 case MMC_BLK_ECC_ERR:
1736 if (brq->data.blocks > 1) {
1737 /* Redo read one sector at a time */
6606110d 1738 pr_warn("%s: retrying using single block read\n",
acd8dbd6 1739 old_req->rq_disk->disk_name);
67716327
AH
1740 disable_multi = 1;
1741 break;
1742 }
d78d4a8a
PF
1743 /*
1744 * After an error, we redo I/O one sector at a
1745 * time, so we only reach here after trying to
1746 * read a single sector.
1747 */
0e65f10c
LW
1748 req_pending = blk_end_request(old_req, -EIO,
1749 brq->data.blksz);
1750 if (!req_pending) {
b2928e10 1751 mmc_blk_rw_try_restart(mq, new_req);
db435505
LW
1752 return;
1753 }
d78d4a8a 1754 break;
a8ad82cc 1755 case MMC_BLK_NOMEDIUM:
acd8dbd6 1756 mmc_blk_rw_cmd_abort(card, old_req);
b2928e10 1757 mmc_blk_rw_try_restart(mq, new_req);
db435505 1758 return;
2220eedf
KD
1759 default:
1760 pr_err("%s: Unhandled return value (%d)",
acd8dbd6
LW
1761 old_req->rq_disk->disk_name, status);
1762 mmc_blk_rw_cmd_abort(card, old_req);
b2928e10 1763 mmc_blk_rw_try_restart(mq, new_req);
db435505 1764 return;
4c2b8f26
RKAL
1765 }
1766
0e65f10c 1767 if (req_pending) {
03d640ae
LW
1768 /*
1769 * In case of a incomplete request
1770 * prepare it again and resend.
1771 */
1772 mmc_blk_rw_rq_prep(mq_rq, card,
1773 disable_multi, mq);
c3399ef5 1774 mmc_start_areq(card->host,
74f5ba35 1775 &mq_rq->areq, NULL);
b8360a49 1776 mq_rq->brq.retune_retry_done = retune_retry_done;
ee8a43a5 1777 }
0e65f10c 1778 } while (req_pending);
1da177e4
LT
1779}
1780
df061588 1781void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
bd788c96 1782{
1a258db6 1783 int ret;
7db3028e 1784 struct mmc_blk_data *md = mq->blkdata;
1a258db6 1785 struct mmc_card *card = md->queue.card;
869c5548 1786 bool req_is_special = mmc_req_is_special(req);
1a258db6 1787
ee8a43a5
PF
1788 if (req && !mq->mqrq_prev->req)
1789 /* claim host only for the first request */
e94cfef6 1790 mmc_get_card(card);
ee8a43a5 1791
371a689f
AW
1792 ret = mmc_blk_part_switch(card, md);
1793 if (ret) {
0d7d85ca 1794 if (req) {
ecf8b5d0 1795 blk_end_request_all(req, -EIO);
0d7d85ca 1796 }
371a689f
AW
1797 goto out;
1798 }
1a258db6 1799
9491be5f 1800 mq->new_request = false;
c2df40df 1801 if (req && req_op(req) == REQ_OP_DISCARD) {
ee8a43a5
PF
1802 /* complete ongoing async transfer before issuing discard */
1803 if (card->host->areq)
1804 mmc_blk_issue_rw_rq(mq, NULL);
df061588 1805 mmc_blk_issue_discard_rq(mq, req);
288dab8a
CH
1806 } else if (req && req_op(req) == REQ_OP_SECURE_ERASE) {
1807 /* complete ongoing async transfer before issuing secure erase*/
1808 if (card->host->areq)
1809 mmc_blk_issue_rw_rq(mq, NULL);
df061588 1810 mmc_blk_issue_secdiscard_rq(mq, req);
3a5e02ce 1811 } else if (req && req_op(req) == REQ_OP_FLUSH) {
393f9a08
JC
1812 /* complete ongoing async transfer before issuing flush */
1813 if (card->host->areq)
1814 mmc_blk_issue_rw_rq(mq, NULL);
df061588 1815 mmc_blk_issue_flush(mq, req);
49804548 1816 } else {
df061588 1817 mmc_blk_issue_rw_rq(mq, req);
2602b740 1818 card->host->context_info.is_waiting_last_req = false;
49804548 1819 }
1a258db6 1820
371a689f 1821out:
9491be5f 1822 if ((!req && !mq->new_request) || req_is_special)
ef3a69c7
SJ
1823 /*
1824 * Release host when there are no more requests
1825 * and after special request(discard, flush) is done.
1826 * In case sepecial request, there is no reentry to
1827 * the 'mmc_blk_issue_rq' with 'mqrq_prev->req'.
1828 */
e94cfef6 1829 mmc_put_card(card);
bd788c96 1830}
1da177e4 1831
a6f6c96b
RK
1832static inline int mmc_blk_readonly(struct mmc_card *card)
1833{
1834 return mmc_card_readonly(card) ||
1835 !(card->csd.cmdclass & CCC_BLOCK_WRITE);
1836}
1837
371a689f
AW
1838static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
1839 struct device *parent,
1840 sector_t size,
1841 bool default_ro,
add710ea
JR
1842 const char *subname,
1843 int area_type)
1da177e4
LT
1844{
1845 struct mmc_blk_data *md;
1846 int devidx, ret;
1847
a04848c7
HK
1848 devidx = ida_simple_get(&mmc_blk_ida, 0, max_devices, GFP_KERNEL);
1849 if (devidx < 0)
1850 return ERR_PTR(devidx);
1da177e4 1851
dd00cc48 1852 md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
a6f6c96b
RK
1853 if (!md) {
1854 ret = -ENOMEM;
1855 goto out;
1856 }
1da177e4 1857
add710ea
JR
1858 md->area_type = area_type;
1859
a6f6c96b
RK
1860 /*
1861 * Set the read-only status based on the supported commands
1862 * and the write protect switch.
1863 */
1864 md->read_only = mmc_blk_readonly(card);
1da177e4 1865
5e71b7a6 1866 md->disk = alloc_disk(perdev_minors);
a6f6c96b
RK
1867 if (md->disk == NULL) {
1868 ret = -ENOMEM;
1869 goto err_kfree;
1870 }
1da177e4 1871
a6f6c96b 1872 spin_lock_init(&md->lock);
371a689f 1873 INIT_LIST_HEAD(&md->part);
a6f6c96b 1874 md->usage = 1;
1da177e4 1875
d09408ad 1876 ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
a6f6c96b
RK
1877 if (ret)
1878 goto err_putdisk;
1da177e4 1879
7db3028e 1880 md->queue.blkdata = md;
d2b18394 1881
fe6b4c88 1882 md->disk->major = MMC_BLOCK_MAJOR;
5e71b7a6 1883 md->disk->first_minor = devidx * perdev_minors;
a6f6c96b
RK
1884 md->disk->fops = &mmc_bdops;
1885 md->disk->private_data = md;
1886 md->disk->queue = md->queue.queue;
307d8e6f 1887 md->parent = parent;
371a689f 1888 set_disk_ro(md->disk, md->read_only || default_ro);
382c55f8 1889 md->disk->flags = GENHD_FL_EXT_DEVT;
f5b4d71f 1890 if (area_type & (MMC_BLK_DATA_AREA_RPMB | MMC_BLK_DATA_AREA_BOOT))
53d8f974 1891 md->disk->flags |= GENHD_FL_NO_PART_SCAN;
a6f6c96b
RK
1892
1893 /*
1894 * As discussed on lkml, GENHD_FL_REMOVABLE should:
1895 *
1896 * - be set for removable media with permanent block devices
1897 * - be unset for removable block devices with permanent media
1898 *
1899 * Since MMC block devices clearly fall under the second
1900 * case, we do not set GENHD_FL_REMOVABLE. Userspace
1901 * should use the block device creation/destruction hotplug
1902 * messages to tell when the card is present.
1903 */
1904
f06c9153 1905 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
9aaf3437 1906 "mmcblk%u%s", card->host->index, subname ? subname : "");
a6f6c96b 1907
a5075eb9
SD
1908 if (mmc_card_mmc(card))
1909 blk_queue_logical_block_size(md->queue.queue,
1910 card->ext_csd.data_sector_size);
1911 else
1912 blk_queue_logical_block_size(md->queue.queue, 512);
1913
371a689f 1914 set_capacity(md->disk, size);
d0c97cfb 1915
f0d89972 1916 if (mmc_host_cmd23(card->host)) {
0ed50abb
DG
1917 if ((mmc_card_mmc(card) &&
1918 card->csd.mmca_vsn >= CSD_SPEC_VER_3) ||
f0d89972
AW
1919 (mmc_card_sd(card) &&
1920 card->scr.cmds & SD_SCR_CMD23_SUPPORT))
1921 md->flags |= MMC_BLK_CMD23;
1922 }
d0c97cfb
AW
1923
1924 if (mmc_card_mmc(card) &&
1925 md->flags & MMC_BLK_CMD23 &&
1926 ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
1927 card->ext_csd.rel_sectors)) {
1928 md->flags |= MMC_BLK_REL_WR;
e9d5c746 1929 blk_queue_write_cache(md->queue.queue, true, true);
d0c97cfb
AW
1930 }
1931
371a689f
AW
1932 return md;
1933
1934 err_putdisk:
1935 put_disk(md->disk);
1936 err_kfree:
1937 kfree(md);
1938 out:
a04848c7 1939 ida_simple_remove(&mmc_blk_ida, devidx);
371a689f
AW
1940 return ERR_PTR(ret);
1941}
1942
1943static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
1944{
1945 sector_t size;
a6f6c96b 1946
85a18ad9
PO
1947 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
1948 /*
1949 * The EXT_CSD sector count is in number or 512 byte
1950 * sectors.
1951 */
371a689f 1952 size = card->ext_csd.sectors;
85a18ad9
PO
1953 } else {
1954 /*
1955 * The CSD capacity field is in units of read_blkbits.
1956 * set_capacity takes units of 512 bytes.
1957 */
087de9ed
KM
1958 size = (typeof(sector_t))card->csd.capacity
1959 << (card->csd.read_blkbits - 9);
85a18ad9 1960 }
371a689f 1961
7a30f2af 1962 return mmc_blk_alloc_req(card, &card->dev, size, false, NULL,
add710ea 1963 MMC_BLK_DATA_AREA_MAIN);
371a689f 1964}
a6f6c96b 1965
371a689f
AW
1966static int mmc_blk_alloc_part(struct mmc_card *card,
1967 struct mmc_blk_data *md,
1968 unsigned int part_type,
1969 sector_t size,
1970 bool default_ro,
add710ea
JR
1971 const char *subname,
1972 int area_type)
371a689f
AW
1973{
1974 char cap_str[10];
1975 struct mmc_blk_data *part_md;
1976
1977 part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
add710ea 1978 subname, area_type);
371a689f
AW
1979 if (IS_ERR(part_md))
1980 return PTR_ERR(part_md);
1981 part_md->part_type = part_type;
1982 list_add(&part_md->part, &md->part);
1983
b9f28d86 1984 string_get_size((u64)get_capacity(part_md->disk), 512, STRING_UNITS_2,
371a689f 1985 cap_str, sizeof(cap_str));
a3c76eb9 1986 pr_info("%s: %s %s partition %u %s\n",
371a689f
AW
1987 part_md->disk->disk_name, mmc_card_id(card),
1988 mmc_card_name(card), part_md->part_type, cap_str);
1989 return 0;
1990}
1991
e0c368d5
NJ
1992/* MMC Physical partitions consist of two boot partitions and
1993 * up to four general purpose partitions.
1994 * For each partition enabled in EXT_CSD a block device will be allocatedi
1995 * to provide access to the partition.
1996 */
1997
371a689f
AW
1998static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
1999{
e0c368d5 2000 int idx, ret = 0;
371a689f
AW
2001
2002 if (!mmc_card_mmc(card))
2003 return 0;
2004
e0c368d5
NJ
2005 for (idx = 0; idx < card->nr_parts; idx++) {
2006 if (card->part[idx].size) {
2007 ret = mmc_blk_alloc_part(card, md,
2008 card->part[idx].part_cfg,
2009 card->part[idx].size >> 9,
2010 card->part[idx].force_ro,
add710ea
JR
2011 card->part[idx].name,
2012 card->part[idx].area_type);
e0c368d5
NJ
2013 if (ret)
2014 return ret;
2015 }
371a689f
AW
2016 }
2017
2018 return ret;
1da177e4
LT
2019}
2020
371a689f
AW
2021static void mmc_blk_remove_req(struct mmc_blk_data *md)
2022{
add710ea
JR
2023 struct mmc_card *card;
2024
371a689f 2025 if (md) {
fdfa20c1
PT
2026 /*
2027 * Flush remaining requests and free queues. It
2028 * is freeing the queue that stops new requests
2029 * from being accepted.
2030 */
8efb83a2 2031 card = md->queue.card;
fdfa20c1 2032 mmc_cleanup_queue(&md->queue);
371a689f
AW
2033 if (md->disk->flags & GENHD_FL_UP) {
2034 device_remove_file(disk_to_dev(md->disk), &md->force_ro);
add710ea
JR
2035 if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
2036 card->ext_csd.boot_ro_lockable)
2037 device_remove_file(disk_to_dev(md->disk),
2038 &md->power_ro_lock);
371a689f 2039
371a689f
AW
2040 del_gendisk(md->disk);
2041 }
371a689f
AW
2042 mmc_blk_put(md);
2043 }
2044}
2045
2046static void mmc_blk_remove_parts(struct mmc_card *card,
2047 struct mmc_blk_data *md)
2048{
2049 struct list_head *pos, *q;
2050 struct mmc_blk_data *part_md;
2051
2052 list_for_each_safe(pos, q, &md->part) {
2053 part_md = list_entry(pos, struct mmc_blk_data, part);
2054 list_del(pos);
2055 mmc_blk_remove_req(part_md);
2056 }
2057}
2058
2059static int mmc_add_disk(struct mmc_blk_data *md)
2060{
2061 int ret;
add710ea 2062 struct mmc_card *card = md->queue.card;
371a689f 2063
307d8e6f 2064 device_add_disk(md->parent, md->disk);
371a689f
AW
2065 md->force_ro.show = force_ro_show;
2066 md->force_ro.store = force_ro_store;
641c3187 2067 sysfs_attr_init(&md->force_ro.attr);
371a689f
AW
2068 md->force_ro.attr.name = "force_ro";
2069 md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
2070 ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
2071 if (ret)
add710ea
JR
2072 goto force_ro_fail;
2073
2074 if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
2075 card->ext_csd.boot_ro_lockable) {
88187398 2076 umode_t mode;
add710ea
JR
2077
2078 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS)
2079 mode = S_IRUGO;
2080 else
2081 mode = S_IRUGO | S_IWUSR;
2082
2083 md->power_ro_lock.show = power_ro_lock_show;
2084 md->power_ro_lock.store = power_ro_lock_store;
00d9ac08 2085 sysfs_attr_init(&md->power_ro_lock.attr);
add710ea
JR
2086 md->power_ro_lock.attr.mode = mode;
2087 md->power_ro_lock.attr.name =
2088 "ro_lock_until_next_power_on";
2089 ret = device_create_file(disk_to_dev(md->disk),
2090 &md->power_ro_lock);
2091 if (ret)
2092 goto power_ro_lock_fail;
2093 }
2094 return ret;
2095
2096power_ro_lock_fail:
2097 device_remove_file(disk_to_dev(md->disk), &md->force_ro);
2098force_ro_fail:
2099 del_gendisk(md->disk);
371a689f
AW
2100
2101 return ret;
2102}
2103
96541bac 2104static int mmc_blk_probe(struct mmc_card *card)
1da177e4 2105{
371a689f 2106 struct mmc_blk_data *md, *part_md;
a7bbb573
PO
2107 char cap_str[10];
2108
912490db
PO
2109 /*
2110 * Check that the card supports the command class(es) we need.
2111 */
2112 if (!(card->csd.cmdclass & CCC_BLOCK_READ))
1da177e4
LT
2113 return -ENODEV;
2114
8c7cdbf9 2115 mmc_fixup_device(card, mmc_blk_fixups);
5204d00f 2116
1da177e4
LT
2117 md = mmc_blk_alloc(card);
2118 if (IS_ERR(md))
2119 return PTR_ERR(md);
2120
b9f28d86 2121 string_get_size((u64)get_capacity(md->disk), 512, STRING_UNITS_2,
a7bbb573 2122 cap_str, sizeof(cap_str));
a3c76eb9 2123 pr_info("%s: %s %s %s %s\n",
1da177e4 2124 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
a7bbb573 2125 cap_str, md->read_only ? "(ro)" : "");
1da177e4 2126
371a689f
AW
2127 if (mmc_blk_alloc_parts(card, md))
2128 goto out;
2129
96541bac 2130 dev_set_drvdata(&card->dev, md);
6f60c222 2131
371a689f
AW
2132 if (mmc_add_disk(md))
2133 goto out;
2134
2135 list_for_each_entry(part_md, &md->part, part) {
2136 if (mmc_add_disk(part_md))
2137 goto out;
2138 }
e94cfef6
UH
2139
2140 pm_runtime_set_autosuspend_delay(&card->dev, 3000);
2141 pm_runtime_use_autosuspend(&card->dev);
2142
2143 /*
2144 * Don't enable runtime PM for SD-combo cards here. Leave that
2145 * decision to be taken during the SDIO init sequence instead.
2146 */
2147 if (card->type != MMC_TYPE_SD_COMBO) {
2148 pm_runtime_set_active(&card->dev);
2149 pm_runtime_enable(&card->dev);
2150 }
2151
1da177e4
LT
2152 return 0;
2153
2154 out:
371a689f
AW
2155 mmc_blk_remove_parts(card, md);
2156 mmc_blk_remove_req(md);
5865f287 2157 return 0;
1da177e4
LT
2158}
2159
96541bac 2160static void mmc_blk_remove(struct mmc_card *card)
1da177e4 2161{
96541bac 2162 struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
1da177e4 2163
371a689f 2164 mmc_blk_remove_parts(card, md);
e94cfef6 2165 pm_runtime_get_sync(&card->dev);
ddd6fa7e
AH
2166 mmc_claim_host(card->host);
2167 mmc_blk_part_switch(card, md);
2168 mmc_release_host(card->host);
e94cfef6
UH
2169 if (card->type != MMC_TYPE_SD_COMBO)
2170 pm_runtime_disable(&card->dev);
2171 pm_runtime_put_noidle(&card->dev);
371a689f 2172 mmc_blk_remove_req(md);
96541bac 2173 dev_set_drvdata(&card->dev, NULL);
1da177e4
LT
2174}
2175
96541bac 2176static int _mmc_blk_suspend(struct mmc_card *card)
1da177e4 2177{
371a689f 2178 struct mmc_blk_data *part_md;
96541bac 2179 struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
1da177e4
LT
2180
2181 if (md) {
2182 mmc_queue_suspend(&md->queue);
371a689f
AW
2183 list_for_each_entry(part_md, &md->part, part) {
2184 mmc_queue_suspend(&part_md->queue);
2185 }
1da177e4
LT
2186 }
2187 return 0;
2188}
2189
96541bac 2190static void mmc_blk_shutdown(struct mmc_card *card)
76287748 2191{
96541bac 2192 _mmc_blk_suspend(card);
76287748
UH
2193}
2194
0967edc6
UH
2195#ifdef CONFIG_PM_SLEEP
2196static int mmc_blk_suspend(struct device *dev)
76287748 2197{
96541bac
UH
2198 struct mmc_card *card = mmc_dev_to_card(dev);
2199
2200 return _mmc_blk_suspend(card);
76287748
UH
2201}
2202
0967edc6 2203static int mmc_blk_resume(struct device *dev)
1da177e4 2204{
371a689f 2205 struct mmc_blk_data *part_md;
fc95e30b 2206 struct mmc_blk_data *md = dev_get_drvdata(dev);
1da177e4
LT
2207
2208 if (md) {
371a689f
AW
2209 /*
2210 * Resume involves the card going into idle state,
2211 * so current partition is always the main one.
2212 */
2213 md->part_curr = md->part_type;
1da177e4 2214 mmc_queue_resume(&md->queue);
371a689f
AW
2215 list_for_each_entry(part_md, &md->part, part) {
2216 mmc_queue_resume(&part_md->queue);
2217 }
1da177e4
LT
2218 }
2219 return 0;
2220}
1da177e4
LT
2221#endif
2222
0967edc6
UH
2223static SIMPLE_DEV_PM_OPS(mmc_blk_pm_ops, mmc_blk_suspend, mmc_blk_resume);
2224
96541bac
UH
2225static struct mmc_driver mmc_driver = {
2226 .drv = {
2227 .name = "mmcblk",
2228 .pm = &mmc_blk_pm_ops,
2229 },
1da177e4
LT
2230 .probe = mmc_blk_probe,
2231 .remove = mmc_blk_remove,
76287748 2232 .shutdown = mmc_blk_shutdown,
1da177e4
LT
2233};
2234
2235static int __init mmc_blk_init(void)
2236{
9d4e98e9 2237 int res;
1da177e4 2238
5e71b7a6
OJ
2239 if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
2240 pr_info("mmcblk: using %d minors per device\n", perdev_minors);
2241
a26eba61 2242 max_devices = min(MAX_DEVICES, (1 << MINORBITS) / perdev_minors);
5e71b7a6 2243
fe6b4c88
PO
2244 res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
2245 if (res)
1da177e4 2246 goto out;
1da177e4 2247
9d4e98e9
AM
2248 res = mmc_register_driver(&mmc_driver);
2249 if (res)
2250 goto out2;
1da177e4 2251
9d4e98e9
AM
2252 return 0;
2253 out2:
2254 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
1da177e4
LT
2255 out:
2256 return res;
2257}
2258
2259static void __exit mmc_blk_exit(void)
2260{
2261 mmc_unregister_driver(&mmc_driver);
fe6b4c88 2262 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
1da177e4
LT
2263}
2264
2265module_init(mmc_blk_init);
2266module_exit(mmc_blk_exit);
2267
2268MODULE_LICENSE("GPL");
2269MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
2270