]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/mmc/card/block.c
mmc: delete is_first_req parameter from pre-request callback
[mirror_ubuntu-artful-kernel.git] / drivers / mmc / card / block.c
CommitLineData
1da177e4
LT
1/*
2 * Block driver for media (i.e., flash cards)
3 *
4 * Copyright 2002 Hewlett-Packard Company
979ce720 5 * Copyright 2005-2008 Pierre Ossman
1da177e4
LT
6 *
7 * Use consistent with the GNU GPL is permitted,
8 * provided that this copyright notice is
9 * preserved in its entirety in all copies and derived works.
10 *
11 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
12 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
13 * FITNESS FOR ANY PARTICULAR PURPOSE.
14 *
15 * Many thanks to Alessandro Rubini and Jonathan Corbet!
16 *
17 * Author: Andrew Christian
18 * 28 May 2002
19 */
20#include <linux/moduleparam.h>
21#include <linux/module.h>
22#include <linux/init.h>
23
1da177e4
LT
24#include <linux/kernel.h>
25#include <linux/fs.h>
5a0e3ad6 26#include <linux/slab.h>
1da177e4
LT
27#include <linux/errno.h>
28#include <linux/hdreg.h>
29#include <linux/kdev_t.h>
30#include <linux/blkdev.h>
a621aaed 31#include <linux/mutex.h>
ec5a19dd 32#include <linux/scatterlist.h>
a7bbb573 33#include <linux/string_helpers.h>
cb87ea28
JC
34#include <linux/delay.h>
35#include <linux/capability.h>
36#include <linux/compat.h>
e94cfef6 37#include <linux/pm_runtime.h>
b10fa99e 38#include <linux/idr.h>
1da177e4 39
cb87ea28 40#include <linux/mmc/ioctl.h>
1da177e4 41#include <linux/mmc/card.h>
385e3227 42#include <linux/mmc/host.h>
da7fbe58
PO
43#include <linux/mmc/mmc.h>
44#include <linux/mmc/sd.h>
1da177e4 45
1da177e4
LT
46#include <asm/uaccess.h>
47
98ac2162 48#include "queue.h"
48ab086d 49#include "block.h"
1da177e4 50
6b0b6285 51MODULE_ALIAS("mmc:block");
5e71b7a6
OJ
52#ifdef MODULE_PARAM_PREFIX
53#undef MODULE_PARAM_PREFIX
54#endif
55#define MODULE_PARAM_PREFIX "mmcblk."
56
6a7a6b45
AW
57#define INAND_CMD38_ARG_EXT_CSD 113
58#define INAND_CMD38_ARG_ERASE 0x00
59#define INAND_CMD38_ARG_TRIM 0x01
60#define INAND_CMD38_ARG_SECERASE 0x80
61#define INAND_CMD38_ARG_SECTRIM1 0x81
62#define INAND_CMD38_ARG_SECTRIM2 0x88
8fee476b 63#define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
775a9362
ME
64#define MMC_SANITIZE_REQ_TIMEOUT 240000
65#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
6a7a6b45 66
d3df0465 67#define mmc_req_rel_wr(req) ((req->cmd_flags & REQ_FUA) && \
ce39f9d1
SJ
68 (rq_data_dir(req) == WRITE))
69#define PACKED_CMD_VER 0x01
70#define PACKED_CMD_WR 0x02
71
5e71b7a6 72static DEFINE_MUTEX(block_mutex);
6b0b6285 73
1da177e4 74/*
5e71b7a6
OJ
75 * The defaults come from config options but can be overriden by module
76 * or bootarg options.
1da177e4 77 */
5e71b7a6 78static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
1dff3144 79
5e71b7a6
OJ
80/*
81 * We've only got one major, so number of mmcblk devices is
a26eba61 82 * limited to (1 << 20) / number of minors per device. It is also
b10fa99e 83 * limited by the MAX_DEVICES below.
5e71b7a6
OJ
84 */
85static int max_devices;
86
a26eba61
BH
87#define MAX_DEVICES 256
88
b10fa99e
UH
89static DEFINE_IDA(mmc_blk_ida);
90static DEFINE_SPINLOCK(mmc_blk_lock);
1da177e4 91
1da177e4
LT
92/*
93 * There is one mmc_blk_data per slot.
94 */
95struct mmc_blk_data {
96 spinlock_t lock;
307d8e6f 97 struct device *parent;
1da177e4
LT
98 struct gendisk *disk;
99 struct mmc_queue queue;
371a689f 100 struct list_head part;
1da177e4 101
d0c97cfb
AW
102 unsigned int flags;
103#define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
104#define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
ce39f9d1 105#define MMC_BLK_PACKED_CMD (1 << 2) /* MMC packed command support */
d0c97cfb 106
1da177e4 107 unsigned int usage;
a6f6c96b 108 unsigned int read_only;
371a689f 109 unsigned int part_type;
67716327
AH
110 unsigned int reset_done;
111#define MMC_BLK_READ BIT(0)
112#define MMC_BLK_WRITE BIT(1)
113#define MMC_BLK_DISCARD BIT(2)
114#define MMC_BLK_SECDISCARD BIT(3)
371a689f
AW
115
116 /*
117 * Only set in main mmc_blk_data associated
fc95e30b 118 * with mmc_card with dev_set_drvdata, and keeps
371a689f
AW
119 * track of the current selected device partition.
120 */
121 unsigned int part_curr;
122 struct device_attribute force_ro;
add710ea
JR
123 struct device_attribute power_ro_lock;
124 int area_type;
1da177e4
LT
125};
126
a621aaed 127static DEFINE_MUTEX(open_lock);
1da177e4 128
ce39f9d1
SJ
129enum {
130 MMC_PACKED_NR_IDX = -1,
131 MMC_PACKED_NR_ZERO,
132 MMC_PACKED_NR_SINGLE,
133};
134
5e71b7a6
OJ
135module_param(perdev_minors, int, 0444);
136MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
137
8d1e977d
LP
138static inline int mmc_blk_part_switch(struct mmc_card *card,
139 struct mmc_blk_data *md);
140static int get_card_status(struct mmc_card *card, u32 *status, int retries);
141
ce39f9d1
SJ
142static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq)
143{
144 struct mmc_packed *packed = mqrq->packed;
145
ce39f9d1
SJ
146 mqrq->cmd_type = MMC_PACKED_NONE;
147 packed->nr_entries = MMC_PACKED_NR_ZERO;
148 packed->idx_failure = MMC_PACKED_NR_IDX;
149 packed->retries = 0;
150 packed->blocks = 0;
151}
152
1da177e4
LT
153static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
154{
155 struct mmc_blk_data *md;
156
a621aaed 157 mutex_lock(&open_lock);
1da177e4
LT
158 md = disk->private_data;
159 if (md && md->usage == 0)
160 md = NULL;
161 if (md)
162 md->usage++;
a621aaed 163 mutex_unlock(&open_lock);
1da177e4
LT
164
165 return md;
166}
167
371a689f
AW
168static inline int mmc_get_devidx(struct gendisk *disk)
169{
382c55f8 170 int devidx = disk->first_minor / perdev_minors;
371a689f
AW
171 return devidx;
172}
173
1da177e4
LT
174static void mmc_blk_put(struct mmc_blk_data *md)
175{
a621aaed 176 mutex_lock(&open_lock);
1da177e4
LT
177 md->usage--;
178 if (md->usage == 0) {
371a689f 179 int devidx = mmc_get_devidx(md->disk);
5fa83ce2
AH
180 blk_cleanup_queue(md->queue.queue);
181
b10fa99e
UH
182 spin_lock(&mmc_blk_lock);
183 ida_remove(&mmc_blk_ida, devidx);
184 spin_unlock(&mmc_blk_lock);
1dff3144 185
1da177e4 186 put_disk(md->disk);
1da177e4
LT
187 kfree(md);
188 }
a621aaed 189 mutex_unlock(&open_lock);
1da177e4
LT
190}
191
add710ea
JR
192static ssize_t power_ro_lock_show(struct device *dev,
193 struct device_attribute *attr, char *buf)
194{
195 int ret;
196 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
197 struct mmc_card *card = md->queue.card;
198 int locked = 0;
199
200 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN)
201 locked = 2;
202 else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
203 locked = 1;
204
205 ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
206
9098f84c
TW
207 mmc_blk_put(md);
208
add710ea
JR
209 return ret;
210}
211
212static ssize_t power_ro_lock_store(struct device *dev,
213 struct device_attribute *attr, const char *buf, size_t count)
214{
215 int ret;
216 struct mmc_blk_data *md, *part_md;
217 struct mmc_card *card;
218 unsigned long set;
219
220 if (kstrtoul(buf, 0, &set))
221 return -EINVAL;
222
223 if (set != 1)
224 return count;
225
226 md = mmc_blk_get(dev_to_disk(dev));
227 card = md->queue.card;
228
e94cfef6 229 mmc_get_card(card);
add710ea
JR
230
231 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
232 card->ext_csd.boot_ro_lock |
233 EXT_CSD_BOOT_WP_B_PWR_WP_EN,
234 card->ext_csd.part_time);
235 if (ret)
236 pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md->disk->disk_name, ret);
237 else
238 card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN;
239
e94cfef6 240 mmc_put_card(card);
add710ea
JR
241
242 if (!ret) {
243 pr_info("%s: Locking boot partition ro until next power on\n",
244 md->disk->disk_name);
245 set_disk_ro(md->disk, 1);
246
247 list_for_each_entry(part_md, &md->part, part)
248 if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) {
249 pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name);
250 set_disk_ro(part_md->disk, 1);
251 }
252 }
253
254 mmc_blk_put(md);
255 return count;
256}
257
371a689f
AW
258static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
259 char *buf)
260{
261 int ret;
262 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
263
0031a98a 264 ret = snprintf(buf, PAGE_SIZE, "%d\n",
371a689f
AW
265 get_disk_ro(dev_to_disk(dev)) ^
266 md->read_only);
267 mmc_blk_put(md);
268 return ret;
269}
270
271static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
272 const char *buf, size_t count)
273{
274 int ret;
275 char *end;
276 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
277 unsigned long set = simple_strtoul(buf, &end, 0);
278 if (end == buf) {
279 ret = -EINVAL;
280 goto out;
281 }
282
283 set_disk_ro(dev_to_disk(dev), set || md->read_only);
284 ret = count;
285out:
286 mmc_blk_put(md);
287 return ret;
288}
289
a5a1561f 290static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
1da177e4 291{
a5a1561f 292 struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
1da177e4
LT
293 int ret = -ENXIO;
294
2a48fc0a 295 mutex_lock(&block_mutex);
1da177e4
LT
296 if (md) {
297 if (md->usage == 2)
a5a1561f 298 check_disk_change(bdev);
1da177e4 299 ret = 0;
a00fc090 300
a5a1561f 301 if ((mode & FMODE_WRITE) && md->read_only) {
70bb0896 302 mmc_blk_put(md);
a00fc090 303 ret = -EROFS;
70bb0896 304 }
1da177e4 305 }
2a48fc0a 306 mutex_unlock(&block_mutex);
1da177e4
LT
307
308 return ret;
309}
310
db2a144b 311static void mmc_blk_release(struct gendisk *disk, fmode_t mode)
1da177e4 312{
a5a1561f 313 struct mmc_blk_data *md = disk->private_data;
1da177e4 314
2a48fc0a 315 mutex_lock(&block_mutex);
1da177e4 316 mmc_blk_put(md);
2a48fc0a 317 mutex_unlock(&block_mutex);
1da177e4
LT
318}
319
320static int
a885c8c4 321mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1da177e4 322{
a885c8c4
CH
323 geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
324 geo->heads = 4;
325 geo->sectors = 16;
326 return 0;
1da177e4
LT
327}
328
cb87ea28
JC
329struct mmc_blk_ioc_data {
330 struct mmc_ioc_cmd ic;
331 unsigned char *buf;
332 u64 buf_bytes;
333};
334
335static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
336 struct mmc_ioc_cmd __user *user)
337{
338 struct mmc_blk_ioc_data *idata;
339 int err;
340
1ff8950c 341 idata = kmalloc(sizeof(*idata), GFP_KERNEL);
cb87ea28
JC
342 if (!idata) {
343 err = -ENOMEM;
aea253ec 344 goto out;
cb87ea28
JC
345 }
346
347 if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
348 err = -EFAULT;
aea253ec 349 goto idata_err;
cb87ea28
JC
350 }
351
352 idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
353 if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
354 err = -EOVERFLOW;
aea253ec 355 goto idata_err;
cb87ea28
JC
356 }
357
bfe5b1b1
VV
358 if (!idata->buf_bytes) {
359 idata->buf = NULL;
4d6144de 360 return idata;
bfe5b1b1 361 }
4d6144de 362
1ff8950c 363 idata->buf = kmalloc(idata->buf_bytes, GFP_KERNEL);
cb87ea28
JC
364 if (!idata->buf) {
365 err = -ENOMEM;
aea253ec 366 goto idata_err;
cb87ea28
JC
367 }
368
369 if (copy_from_user(idata->buf, (void __user *)(unsigned long)
370 idata->ic.data_ptr, idata->buf_bytes)) {
371 err = -EFAULT;
372 goto copy_err;
373 }
374
375 return idata;
376
377copy_err:
378 kfree(idata->buf);
aea253ec 379idata_err:
cb87ea28 380 kfree(idata);
aea253ec 381out:
cb87ea28 382 return ERR_PTR(err);
cb87ea28
JC
383}
384
a5f5774c
JH
385static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr,
386 struct mmc_blk_ioc_data *idata)
387{
388 struct mmc_ioc_cmd *ic = &idata->ic;
389
390 if (copy_to_user(&(ic_ptr->response), ic->response,
391 sizeof(ic->response)))
392 return -EFAULT;
393
394 if (!idata->ic.write_flag) {
395 if (copy_to_user((void __user *)(unsigned long)ic->data_ptr,
396 idata->buf, idata->buf_bytes))
397 return -EFAULT;
398 }
399
400 return 0;
401}
402
8d1e977d
LP
403static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status,
404 u32 retries_max)
405{
406 int err;
407 u32 retry_count = 0;
408
409 if (!status || !retries_max)
410 return -EINVAL;
411
412 do {
413 err = get_card_status(card, status, 5);
414 if (err)
415 break;
416
417 if (!R1_STATUS(*status) &&
418 (R1_CURRENT_STATE(*status) != R1_STATE_PRG))
419 break; /* RPMB programming operation complete */
420
421 /*
422 * Rechedule to give the MMC device a chance to continue
423 * processing the previous command without being polled too
424 * frequently.
425 */
426 usleep_range(1000, 5000);
427 } while (++retry_count < retries_max);
428
429 if (retry_count == retries_max)
430 err = -EPERM;
431
432 return err;
433}
434
775a9362
ME
435static int ioctl_do_sanitize(struct mmc_card *card)
436{
437 int err;
438
a2d1086d 439 if (!mmc_can_sanitize(card)) {
775a9362
ME
440 pr_warn("%s: %s - SANITIZE is not supported\n",
441 mmc_hostname(card->host), __func__);
442 err = -EOPNOTSUPP;
443 goto out;
444 }
445
446 pr_debug("%s: %s - SANITIZE IN PROGRESS...\n",
447 mmc_hostname(card->host), __func__);
448
449 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
450 EXT_CSD_SANITIZE_START, 1,
451 MMC_SANITIZE_REQ_TIMEOUT);
452
453 if (err)
454 pr_err("%s: %s - EXT_CSD_SANITIZE_START failed. err=%d\n",
455 mmc_hostname(card->host), __func__, err);
456
457 pr_debug("%s: %s - SANITIZE COMPLETED\n", mmc_hostname(card->host),
458 __func__);
459out:
460 return err;
461}
462
a5f5774c
JH
463static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
464 struct mmc_blk_ioc_data *idata)
cb87ea28 465{
cb87ea28
JC
466 struct mmc_command cmd = {0};
467 struct mmc_data data = {0};
ad5fd972 468 struct mmc_request mrq = {NULL};
cb87ea28
JC
469 struct scatterlist sg;
470 int err;
8d1e977d
LP
471 int is_rpmb = false;
472 u32 status = 0;
cb87ea28 473
a5f5774c
JH
474 if (!card || !md || !idata)
475 return -EINVAL;
cb87ea28 476
8d1e977d
LP
477 if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
478 is_rpmb = true;
479
4d6144de
JR
480 cmd.opcode = idata->ic.opcode;
481 cmd.arg = idata->ic.arg;
482 cmd.flags = idata->ic.flags;
483
484 if (idata->buf_bytes) {
485 data.sg = &sg;
486 data.sg_len = 1;
487 data.blksz = idata->ic.blksz;
488 data.blocks = idata->ic.blocks;
489
490 sg_init_one(data.sg, idata->buf, idata->buf_bytes);
491
492 if (idata->ic.write_flag)
493 data.flags = MMC_DATA_WRITE;
494 else
495 data.flags = MMC_DATA_READ;
496
497 /* data.flags must already be set before doing this. */
498 mmc_set_data_timeout(&data, card);
499
500 /* Allow overriding the timeout_ns for empirical tuning. */
501 if (idata->ic.data_timeout_ns)
502 data.timeout_ns = idata->ic.data_timeout_ns;
503
504 if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
505 /*
506 * Pretend this is a data transfer and rely on the
507 * host driver to compute timeout. When all host
508 * drivers support cmd.cmd_timeout for R1B, this
509 * can be changed to:
510 *
511 * mrq.data = NULL;
512 * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
513 */
514 data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
515 }
516
517 mrq.data = &data;
518 }
519
520 mrq.cmd = &cmd;
521
8d1e977d
LP
522 err = mmc_blk_part_switch(card, md);
523 if (err)
a5f5774c 524 return err;
8d1e977d 525
cb87ea28
JC
526 if (idata->ic.is_acmd) {
527 err = mmc_app_cmd(card->host, card);
528 if (err)
a5f5774c 529 return err;
cb87ea28
JC
530 }
531
8d1e977d
LP
532 if (is_rpmb) {
533 err = mmc_set_blockcount(card, data.blocks,
534 idata->ic.write_flag & (1 << 31));
535 if (err)
a5f5774c 536 return err;
8d1e977d
LP
537 }
538
a82e484e
YG
539 if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) &&
540 (cmd.opcode == MMC_SWITCH)) {
775a9362
ME
541 err = ioctl_do_sanitize(card);
542
543 if (err)
544 pr_err("%s: ioctl_do_sanitize() failed. err = %d",
545 __func__, err);
546
a5f5774c 547 return err;
775a9362
ME
548 }
549
cb87ea28
JC
550 mmc_wait_for_req(card->host, &mrq);
551
552 if (cmd.error) {
553 dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
554 __func__, cmd.error);
a5f5774c 555 return cmd.error;
cb87ea28
JC
556 }
557 if (data.error) {
558 dev_err(mmc_dev(card->host), "%s: data error %d\n",
559 __func__, data.error);
a5f5774c 560 return data.error;
cb87ea28
JC
561 }
562
563 /*
564 * According to the SD specs, some commands require a delay after
565 * issuing the command.
566 */
567 if (idata->ic.postsleep_min_us)
568 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
569
a5f5774c 570 memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp));
cb87ea28 571
8d1e977d
LP
572 if (is_rpmb) {
573 /*
574 * Ensure RPMB command has completed by polling CMD13
575 * "Send Status".
576 */
577 err = ioctl_rpmb_card_status_poll(card, &status, 5);
578 if (err)
579 dev_err(mmc_dev(card->host),
580 "%s: Card Status=0x%08X, error %d\n",
581 __func__, status, err);
582 }
583
a5f5774c
JH
584 return err;
585}
586
587static int mmc_blk_ioctl_cmd(struct block_device *bdev,
588 struct mmc_ioc_cmd __user *ic_ptr)
589{
590 struct mmc_blk_ioc_data *idata;
591 struct mmc_blk_data *md;
592 struct mmc_card *card;
b093410c 593 int err = 0, ioc_err = 0;
a5f5774c 594
83c742c3
SL
595 /*
596 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
597 * whole block device, not on a partition. This prevents overspray
598 * between sibling partitions.
599 */
600 if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
601 return -EPERM;
602
a5f5774c
JH
603 idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
604 if (IS_ERR(idata))
605 return PTR_ERR(idata);
606
607 md = mmc_blk_get(bdev->bd_disk);
608 if (!md) {
609 err = -EINVAL;
610 goto cmd_err;
611 }
612
613 card = md->queue.card;
614 if (IS_ERR(card)) {
615 err = PTR_ERR(card);
616 goto cmd_done;
617 }
618
619 mmc_get_card(card);
620
b093410c 621 ioc_err = __mmc_blk_ioctl_cmd(card, md, idata);
a5f5774c 622
3c866568
AH
623 /* Always switch back to main area after RPMB access */
624 if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
625 mmc_blk_part_switch(card, dev_get_drvdata(&card->dev));
626
e94cfef6 627 mmc_put_card(card);
cb87ea28 628
b093410c 629 err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
a5f5774c 630
cb87ea28
JC
631cmd_done:
632 mmc_blk_put(md);
1c02f000 633cmd_err:
cb87ea28
JC
634 kfree(idata->buf);
635 kfree(idata);
b093410c 636 return ioc_err ? ioc_err : err;
cb87ea28
JC
637}
638
a5f5774c
JH
639static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev,
640 struct mmc_ioc_multi_cmd __user *user)
641{
642 struct mmc_blk_ioc_data **idata = NULL;
643 struct mmc_ioc_cmd __user *cmds = user->cmds;
644 struct mmc_card *card;
645 struct mmc_blk_data *md;
b093410c 646 int i, err = 0, ioc_err = 0;
a5f5774c
JH
647 __u64 num_of_cmds;
648
83c742c3
SL
649 /*
650 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
651 * whole block device, not on a partition. This prevents overspray
652 * between sibling partitions.
653 */
654 if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
655 return -EPERM;
656
a5f5774c
JH
657 if (copy_from_user(&num_of_cmds, &user->num_of_cmds,
658 sizeof(num_of_cmds)))
659 return -EFAULT;
660
661 if (num_of_cmds > MMC_IOC_MAX_CMDS)
662 return -EINVAL;
663
664 idata = kcalloc(num_of_cmds, sizeof(*idata), GFP_KERNEL);
665 if (!idata)
666 return -ENOMEM;
667
668 for (i = 0; i < num_of_cmds; i++) {
669 idata[i] = mmc_blk_ioctl_copy_from_user(&cmds[i]);
670 if (IS_ERR(idata[i])) {
671 err = PTR_ERR(idata[i]);
672 num_of_cmds = i;
673 goto cmd_err;
674 }
675 }
676
677 md = mmc_blk_get(bdev->bd_disk);
f00ab14c
OJ
678 if (!md) {
679 err = -EINVAL;
a5f5774c 680 goto cmd_err;
f00ab14c 681 }
a5f5774c
JH
682
683 card = md->queue.card;
684 if (IS_ERR(card)) {
685 err = PTR_ERR(card);
686 goto cmd_done;
687 }
688
689 mmc_get_card(card);
690
b093410c
GG
691 for (i = 0; i < num_of_cmds && !ioc_err; i++)
692 ioc_err = __mmc_blk_ioctl_cmd(card, md, idata[i]);
a5f5774c 693
3c866568
AH
694 /* Always switch back to main area after RPMB access */
695 if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
696 mmc_blk_part_switch(card, dev_get_drvdata(&card->dev));
697
a5f5774c
JH
698 mmc_put_card(card);
699
700 /* copy to user if data and response */
b093410c 701 for (i = 0; i < num_of_cmds && !err; i++)
a5f5774c 702 err = mmc_blk_ioctl_copy_to_user(&cmds[i], idata[i]);
a5f5774c
JH
703
704cmd_done:
705 mmc_blk_put(md);
706cmd_err:
707 for (i = 0; i < num_of_cmds; i++) {
708 kfree(idata[i]->buf);
709 kfree(idata[i]);
710 }
711 kfree(idata);
b093410c 712 return ioc_err ? ioc_err : err;
a5f5774c
JH
713}
714
cb87ea28
JC
715static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
716 unsigned int cmd, unsigned long arg)
717{
a5f5774c
JH
718 switch (cmd) {
719 case MMC_IOC_CMD:
720 return mmc_blk_ioctl_cmd(bdev,
721 (struct mmc_ioc_cmd __user *)arg);
722 case MMC_IOC_MULTI_CMD:
723 return mmc_blk_ioctl_multi_cmd(bdev,
724 (struct mmc_ioc_multi_cmd __user *)arg);
725 default:
726 return -EINVAL;
727 }
cb87ea28
JC
728}
729
730#ifdef CONFIG_COMPAT
731static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
732 unsigned int cmd, unsigned long arg)
733{
734 return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
735}
736#endif
737
83d5cde4 738static const struct block_device_operations mmc_bdops = {
a5a1561f
AV
739 .open = mmc_blk_open,
740 .release = mmc_blk_release,
a885c8c4 741 .getgeo = mmc_blk_getgeo,
1da177e4 742 .owner = THIS_MODULE,
cb87ea28
JC
743 .ioctl = mmc_blk_ioctl,
744#ifdef CONFIG_COMPAT
745 .compat_ioctl = mmc_blk_compat_ioctl,
746#endif
1da177e4
LT
747};
748
371a689f
AW
749static inline int mmc_blk_part_switch(struct mmc_card *card,
750 struct mmc_blk_data *md)
751{
752 int ret;
fc95e30b 753 struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev);
0d7d85ca 754
371a689f
AW
755 if (main_md->part_curr == md->part_type)
756 return 0;
757
758 if (mmc_card_mmc(card)) {
0d7d85ca
AH
759 u8 part_config = card->ext_csd.part_config;
760
57da0c04
AH
761 if (md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
762 mmc_retune_pause(card->host);
763
0d7d85ca
AH
764 part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
765 part_config |= md->part_type;
371a689f
AW
766
767 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
0d7d85ca 768 EXT_CSD_PART_CONFIG, part_config,
371a689f 769 card->ext_csd.part_time);
57da0c04
AH
770 if (ret) {
771 if (md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
772 mmc_retune_unpause(card->host);
371a689f 773 return ret;
57da0c04 774 }
0d7d85ca
AH
775
776 card->ext_csd.part_config = part_config;
57da0c04
AH
777
778 if (main_md->part_curr == EXT_CSD_PART_CONFIG_ACC_RPMB)
779 mmc_retune_unpause(card->host);
67716327 780 }
371a689f
AW
781
782 main_md->part_curr = md->part_type;
783 return 0;
784}
785
ec5a19dd
PO
786static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
787{
788 int err;
051913da
BD
789 u32 result;
790 __be32 *blocks;
ec5a19dd 791
ad5fd972 792 struct mmc_request mrq = {NULL};
1278dba1 793 struct mmc_command cmd = {0};
a61ad2b4 794 struct mmc_data data = {0};
ec5a19dd
PO
795
796 struct scatterlist sg;
797
ec5a19dd
PO
798 cmd.opcode = MMC_APP_CMD;
799 cmd.arg = card->rca << 16;
7213d175 800 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
ec5a19dd
PO
801
802 err = mmc_wait_for_cmd(card->host, &cmd, 0);
7213d175
DB
803 if (err)
804 return (u32)-1;
805 if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
ec5a19dd
PO
806 return (u32)-1;
807
808 memset(&cmd, 0, sizeof(struct mmc_command));
809
810 cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
811 cmd.arg = 0;
7213d175 812 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
ec5a19dd 813
ec5a19dd
PO
814 data.blksz = 4;
815 data.blocks = 1;
816 data.flags = MMC_DATA_READ;
817 data.sg = &sg;
818 data.sg_len = 1;
d380443c 819 mmc_set_data_timeout(&data, card);
ec5a19dd 820
ec5a19dd
PO
821 mrq.cmd = &cmd;
822 mrq.data = &data;
823
051913da
BD
824 blocks = kmalloc(4, GFP_KERNEL);
825 if (!blocks)
826 return (u32)-1;
827
828 sg_init_one(&sg, blocks, 4);
ec5a19dd
PO
829
830 mmc_wait_for_req(card->host, &mrq);
831
051913da
BD
832 result = ntohl(*blocks);
833 kfree(blocks);
834
17b0429d 835 if (cmd.error || data.error)
051913da 836 result = (u32)-1;
ec5a19dd 837
051913da 838 return result;
ec5a19dd
PO
839}
840
0a2d4048 841static int get_card_status(struct mmc_card *card, u32 *status, int retries)
504f191f 842{
1278dba1 843 struct mmc_command cmd = {0};
504f191f
AH
844 int err;
845
504f191f
AH
846 cmd.opcode = MMC_SEND_STATUS;
847 if (!mmc_host_is_spi(card->host))
848 cmd.arg = card->rca << 16;
849 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
0a2d4048
RKAL
850 err = mmc_wait_for_cmd(card->host, &cmd, retries);
851 if (err == 0)
852 *status = cmd.resp[0];
853 return err;
504f191f
AH
854}
855
c49433fb 856static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
c44d6cef 857 bool hw_busy_detect, struct request *req, bool *gen_err)
c49433fb
UH
858{
859 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
860 int err = 0;
861 u32 status;
862
863 do {
864 err = get_card_status(card, &status, 5);
865 if (err) {
866 pr_err("%s: error %d requesting status\n",
867 req->rq_disk->disk_name, err);
868 return err;
869 }
870
871 if (status & R1_ERROR) {
872 pr_err("%s: %s: error sending status cmd, status %#x\n",
873 req->rq_disk->disk_name, __func__, status);
c44d6cef 874 *gen_err = true;
c49433fb
UH
875 }
876
95a91298
UH
877 /* We may rely on the host hw to handle busy detection.*/
878 if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) &&
879 hw_busy_detect)
880 break;
881
c49433fb
UH
882 /*
883 * Timeout if the device never becomes ready for data and never
884 * leaves the program state.
885 */
886 if (time_after(jiffies, timeout)) {
887 pr_err("%s: Card stuck in programming state! %s %s\n",
888 mmc_hostname(card->host),
889 req->rq_disk->disk_name, __func__);
890 return -ETIMEDOUT;
891 }
892
893 /*
894 * Some cards mishandle the status bits,
895 * so make sure to check both the busy
896 * indication and the card state.
897 */
898 } while (!(status & R1_READY_FOR_DATA) ||
899 (R1_CURRENT_STATE(status) == R1_STATE_PRG));
900
901 return err;
902}
903
bb5cba40 904static int send_stop(struct mmc_card *card, unsigned int timeout_ms,
c44d6cef 905 struct request *req, bool *gen_err, u32 *stop_status)
bb5cba40
UH
906{
907 struct mmc_host *host = card->host;
908 struct mmc_command cmd = {0};
909 int err;
910 bool use_r1b_resp = rq_data_dir(req) == WRITE;
911
912 /*
913 * Normally we use R1B responses for WRITE, but in cases where the host
914 * has specified a max_busy_timeout we need to validate it. A failure
915 * means we need to prevent the host from doing hw busy detection, which
916 * is done by converting to a R1 response instead.
917 */
918 if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout))
919 use_r1b_resp = false;
920
921 cmd.opcode = MMC_STOP_TRANSMISSION;
922 if (use_r1b_resp) {
923 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
924 cmd.busy_timeout = timeout_ms;
925 } else {
926 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
927 }
928
929 err = mmc_wait_for_cmd(host, &cmd, 5);
930 if (err)
931 return err;
932
933 *stop_status = cmd.resp[0];
934
935 /* No need to check card status in case of READ. */
936 if (rq_data_dir(req) == READ)
937 return 0;
938
939 if (!mmc_host_is_spi(host) &&
940 (*stop_status & R1_ERROR)) {
941 pr_err("%s: %s: general error sending stop command, resp %#x\n",
942 req->rq_disk->disk_name, __func__, *stop_status);
c44d6cef 943 *gen_err = true;
bb5cba40
UH
944 }
945
946 return card_busy_detect(card, timeout_ms, use_r1b_resp, req, gen_err);
947}
948
a8ad82cc 949#define ERR_NOMEDIUM 3
a01f3ccf
RKAL
950#define ERR_RETRY 2
951#define ERR_ABORT 1
952#define ERR_CONTINUE 0
953
954static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
955 bool status_valid, u32 status)
956{
957 switch (error) {
958 case -EILSEQ:
959 /* response crc error, retry the r/w cmd */
960 pr_err("%s: %s sending %s command, card status %#x\n",
961 req->rq_disk->disk_name, "response CRC error",
962 name, status);
963 return ERR_RETRY;
964
965 case -ETIMEDOUT:
966 pr_err("%s: %s sending %s command, card status %#x\n",
967 req->rq_disk->disk_name, "timed out", name, status);
968
969 /* If the status cmd initially failed, retry the r/w cmd */
cc4d04be
KS
970 if (!status_valid) {
971 pr_err("%s: status not valid, retrying timeout\n",
972 req->rq_disk->disk_name);
a01f3ccf 973 return ERR_RETRY;
cc4d04be 974 }
a01f3ccf
RKAL
975
976 /*
977 * If it was a r/w cmd crc error, or illegal command
978 * (eg, issued in wrong state) then retry - we should
979 * have corrected the state problem above.
980 */
cc4d04be
KS
981 if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) {
982 pr_err("%s: command error, retrying timeout\n",
983 req->rq_disk->disk_name);
a01f3ccf 984 return ERR_RETRY;
cc4d04be 985 }
a01f3ccf
RKAL
986
987 /* Otherwise abort the command */
988 return ERR_ABORT;
989
990 default:
991 /* We don't understand the error code the driver gave us */
992 pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
993 req->rq_disk->disk_name, error, status);
994 return ERR_ABORT;
995 }
996}
997
998/*
999 * Initial r/w and stop cmd error recovery.
1000 * We don't know whether the card received the r/w cmd or not, so try to
1001 * restore things back to a sane state. Essentially, we do this as follows:
1002 * - Obtain card status. If the first attempt to obtain card status fails,
1003 * the status word will reflect the failed status cmd, not the failed
1004 * r/w cmd. If we fail to obtain card status, it suggests we can no
1005 * longer communicate with the card.
1006 * - Check the card state. If the card received the cmd but there was a
1007 * transient problem with the response, it might still be in a data transfer
1008 * mode. Try to send it a stop command. If this fails, we can't recover.
1009 * - If the r/w cmd failed due to a response CRC error, it was probably
1010 * transient, so retry the cmd.
1011 * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
1012 * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
1013 * illegal cmd, retry.
1014 * Otherwise we don't understand what happened, so abort.
1015 */
1016static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
2cc64587 1017 struct mmc_blk_request *brq, bool *ecc_err, bool *gen_err)
a01f3ccf
RKAL
1018{
1019 bool prev_cmd_status_valid = true;
1020 u32 status, stop_status = 0;
1021 int err, retry;
1022
a8ad82cc
SRT
1023 if (mmc_card_removed(card))
1024 return ERR_NOMEDIUM;
1025
a01f3ccf
RKAL
1026 /*
1027 * Try to get card status which indicates both the card state
1028 * and why there was no response. If the first attempt fails,
1029 * we can't be sure the returned status is for the r/w command.
1030 */
1031 for (retry = 2; retry >= 0; retry--) {
1032 err = get_card_status(card, &status, 0);
1033 if (!err)
1034 break;
1035
6f398ad2
AH
1036 /* Re-tune if needed */
1037 mmc_retune_recheck(card->host);
1038
a01f3ccf
RKAL
1039 prev_cmd_status_valid = false;
1040 pr_err("%s: error %d sending status command, %sing\n",
1041 req->rq_disk->disk_name, err, retry ? "retry" : "abort");
1042 }
1043
1044 /* We couldn't get a response from the card. Give up. */
a8ad82cc
SRT
1045 if (err) {
1046 /* Check if the card is removed */
1047 if (mmc_detect_card_removed(card->host))
1048 return ERR_NOMEDIUM;
a01f3ccf 1049 return ERR_ABORT;
a8ad82cc 1050 }
a01f3ccf 1051
67716327
AH
1052 /* Flag ECC errors */
1053 if ((status & R1_CARD_ECC_FAILED) ||
1054 (brq->stop.resp[0] & R1_CARD_ECC_FAILED) ||
1055 (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
2cc64587 1056 *ecc_err = true;
67716327 1057
c8760069
KY
1058 /* Flag General errors */
1059 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
1060 if ((status & R1_ERROR) ||
1061 (brq->stop.resp[0] & R1_ERROR)) {
1062 pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
1063 req->rq_disk->disk_name, __func__,
1064 brq->stop.resp[0], status);
c44d6cef 1065 *gen_err = true;
c8760069
KY
1066 }
1067
a01f3ccf
RKAL
1068 /*
1069 * Check the current card state. If it is in some data transfer
1070 * mode, tell it to stop (and hopefully transition back to TRAN.)
1071 */
1072 if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
1073 R1_CURRENT_STATE(status) == R1_STATE_RCV) {
bb5cba40
UH
1074 err = send_stop(card,
1075 DIV_ROUND_UP(brq->data.timeout_ns, 1000000),
1076 req, gen_err, &stop_status);
1077 if (err) {
a01f3ccf
RKAL
1078 pr_err("%s: error %d sending stop command\n",
1079 req->rq_disk->disk_name, err);
bb5cba40
UH
1080 /*
1081 * If the stop cmd also timed out, the card is probably
1082 * not present, so abort. Other errors are bad news too.
1083 */
a01f3ccf 1084 return ERR_ABORT;
bb5cba40
UH
1085 }
1086
67716327 1087 if (stop_status & R1_CARD_ECC_FAILED)
2cc64587 1088 *ecc_err = true;
a01f3ccf
RKAL
1089 }
1090
1091 /* Check for set block count errors */
1092 if (brq->sbc.error)
1093 return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error,
1094 prev_cmd_status_valid, status);
1095
1096 /* Check for r/w command errors */
1097 if (brq->cmd.error)
1098 return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
1099 prev_cmd_status_valid, status);
1100
67716327
AH
1101 /* Data errors */
1102 if (!brq->stop.error)
1103 return ERR_CONTINUE;
1104
a01f3ccf 1105 /* Now for stop errors. These aren't fatal to the transfer. */
5e1344eb 1106 pr_info("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
a01f3ccf
RKAL
1107 req->rq_disk->disk_name, brq->stop.error,
1108 brq->cmd.resp[0], status);
1109
1110 /*
1111 * Subsitute in our own stop status as this will give the error
1112 * state which happened during the execution of the r/w command.
1113 */
1114 if (stop_status) {
1115 brq->stop.resp[0] = stop_status;
1116 brq->stop.error = 0;
1117 }
1118 return ERR_CONTINUE;
1119}
1120
67716327
AH
1121static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
1122 int type)
1123{
1124 int err;
1125
1126 if (md->reset_done & type)
1127 return -EEXIST;
1128
1129 md->reset_done |= type;
1130 err = mmc_hw_reset(host);
1131 /* Ensure we switch back to the correct partition */
1132 if (err != -EOPNOTSUPP) {
fc95e30b
UH
1133 struct mmc_blk_data *main_md =
1134 dev_get_drvdata(&host->card->dev);
67716327
AH
1135 int part_err;
1136
1137 main_md->part_curr = main_md->part_type;
1138 part_err = mmc_blk_part_switch(host->card, md);
1139 if (part_err) {
1140 /*
1141 * We have failed to get back into the correct
1142 * partition, so we need to abort the whole request.
1143 */
1144 return -ENODEV;
1145 }
1146 }
1147 return err;
1148}
1149
1150static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
1151{
1152 md->reset_done &= ~type;
1153}
1154
4e93b9a6
CD
1155int mmc_access_rpmb(struct mmc_queue *mq)
1156{
7db3028e 1157 struct mmc_blk_data *md = mq->blkdata;
4e93b9a6
CD
1158 /*
1159 * If this is a RPMB partition access, return ture
1160 */
1161 if (md && md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
1162 return true;
1163
1164 return false;
1165}
1166
bd788c96
AH
1167static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
1168{
7db3028e 1169 struct mmc_blk_data *md = mq->blkdata;
bd788c96
AH
1170 struct mmc_card *card = md->queue.card;
1171 unsigned int from, nr, arg;
67716327 1172 int err = 0, type = MMC_BLK_DISCARD;
bd788c96 1173
bd788c96
AH
1174 if (!mmc_can_erase(card)) {
1175 err = -EOPNOTSUPP;
1176 goto out;
1177 }
1178
1179 from = blk_rq_pos(req);
1180 nr = blk_rq_sectors(req);
1181
b3bf9153
KP
1182 if (mmc_can_discard(card))
1183 arg = MMC_DISCARD_ARG;
1184 else if (mmc_can_trim(card))
bd788c96
AH
1185 arg = MMC_TRIM_ARG;
1186 else
1187 arg = MMC_ERASE_ARG;
67716327 1188retry:
6a7a6b45
AW
1189 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1190 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1191 INAND_CMD38_ARG_EXT_CSD,
1192 arg == MMC_TRIM_ARG ?
1193 INAND_CMD38_ARG_TRIM :
1194 INAND_CMD38_ARG_ERASE,
1195 0);
1196 if (err)
1197 goto out;
1198 }
bd788c96
AH
1199 err = mmc_erase(card, from, nr, arg);
1200out:
67716327
AH
1201 if (err == -EIO && !mmc_blk_reset(md, card->host, type))
1202 goto retry;
1203 if (!err)
1204 mmc_blk_reset_success(md, type);
ecf8b5d0 1205 blk_end_request(req, err, blk_rq_bytes(req));
bd788c96 1206
bd788c96
AH
1207 return err ? 0 : 1;
1208}
1209
49804548
AH
1210static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
1211 struct request *req)
1212{
7db3028e 1213 struct mmc_blk_data *md = mq->blkdata;
49804548 1214 struct mmc_card *card = md->queue.card;
775a9362 1215 unsigned int from, nr, arg;
67716327 1216 int err = 0, type = MMC_BLK_SECDISCARD;
49804548 1217
775a9362 1218 if (!(mmc_can_secure_erase_trim(card))) {
49804548
AH
1219 err = -EOPNOTSUPP;
1220 goto out;
1221 }
1222
28302812
AH
1223 from = blk_rq_pos(req);
1224 nr = blk_rq_sectors(req);
1225
775a9362
ME
1226 if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
1227 arg = MMC_SECURE_TRIM1_ARG;
1228 else
1229 arg = MMC_SECURE_ERASE_ARG;
d9ddd629 1230
67716327 1231retry:
6a7a6b45
AW
1232 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1233 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1234 INAND_CMD38_ARG_EXT_CSD,
1235 arg == MMC_SECURE_TRIM1_ARG ?
1236 INAND_CMD38_ARG_SECTRIM1 :
1237 INAND_CMD38_ARG_SECERASE,
1238 0);
1239 if (err)
28302812 1240 goto out_retry;
6a7a6b45 1241 }
28302812 1242
49804548 1243 err = mmc_erase(card, from, nr, arg);
28302812
AH
1244 if (err == -EIO)
1245 goto out_retry;
1246 if (err)
1247 goto out;
1248
1249 if (arg == MMC_SECURE_TRIM1_ARG) {
6a7a6b45
AW
1250 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1251 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1252 INAND_CMD38_ARG_EXT_CSD,
1253 INAND_CMD38_ARG_SECTRIM2,
1254 0);
1255 if (err)
28302812 1256 goto out_retry;
6a7a6b45 1257 }
28302812 1258
49804548 1259 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
28302812
AH
1260 if (err == -EIO)
1261 goto out_retry;
1262 if (err)
1263 goto out;
6a7a6b45 1264 }
28302812 1265
28302812
AH
1266out_retry:
1267 if (err && !mmc_blk_reset(md, card->host, type))
67716327
AH
1268 goto retry;
1269 if (!err)
1270 mmc_blk_reset_success(md, type);
28302812 1271out:
ecf8b5d0 1272 blk_end_request(req, err, blk_rq_bytes(req));
49804548 1273
49804548
AH
1274 return err ? 0 : 1;
1275}
1276
f4c5522b
AW
1277static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
1278{
7db3028e 1279 struct mmc_blk_data *md = mq->blkdata;
881d1c25
SJ
1280 struct mmc_card *card = md->queue.card;
1281 int ret = 0;
1282
1283 ret = mmc_flush_cache(card);
1284 if (ret)
1285 ret = -EIO;
f4c5522b 1286
ecf8b5d0 1287 blk_end_request_all(req, ret);
f4c5522b 1288
881d1c25 1289 return ret ? 0 : 1;
f4c5522b
AW
1290}
1291
1292/*
1293 * Reformat current write as a reliable write, supporting
1294 * both legacy and the enhanced reliable write MMC cards.
1295 * In each transfer we'll handle only as much as a single
1296 * reliable write can handle, thus finish the request in
1297 * partial completions.
1298 */
d0c97cfb
AW
1299static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
1300 struct mmc_card *card,
1301 struct request *req)
f4c5522b 1302{
f4c5522b
AW
1303 if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
1304 /* Legacy mode imposes restrictions on transfers. */
1305 if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
1306 brq->data.blocks = 1;
1307
1308 if (brq->data.blocks > card->ext_csd.rel_sectors)
1309 brq->data.blocks = card->ext_csd.rel_sectors;
1310 else if (brq->data.blocks < card->ext_csd.rel_sectors)
1311 brq->data.blocks = 1;
1312 }
f4c5522b
AW
1313}
1314
4c2b8f26
RKAL
1315#define CMD_ERRORS \
1316 (R1_OUT_OF_RANGE | /* Command argument out of range */ \
1317 R1_ADDRESS_ERROR | /* Misaligned address */ \
1318 R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\
1319 R1_WP_VIOLATION | /* Tried to write to protected block */ \
1320 R1_CC_ERROR | /* Card controller error */ \
1321 R1_ERROR) /* General/unknown error */
1322
8e8b3f51
LW
1323static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card,
1324 struct mmc_async_req *areq)
d78d4a8a 1325{
ee8a43a5
PF
1326 struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
1327 mmc_active);
1328 struct mmc_blk_request *brq = &mq_mrq->brq;
1329 struct request *req = mq_mrq->req;
b8360a49 1330 int need_retune = card->host->need_retune;
2cc64587 1331 bool ecc_err = false;
c44d6cef 1332 bool gen_err = false;
d78d4a8a
PF
1333
1334 /*
1335 * sbc.error indicates a problem with the set block count
1336 * command. No data will have been transferred.
1337 *
1338 * cmd.error indicates a problem with the r/w command. No
1339 * data will have been transferred.
1340 *
1341 * stop.error indicates a problem with the stop command. Data
1342 * may have been transferred, or may still be transferring.
1343 */
67716327
AH
1344 if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
1345 brq->data.error) {
c8760069 1346 switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
d78d4a8a
PF
1347 case ERR_RETRY:
1348 return MMC_BLK_RETRY;
1349 case ERR_ABORT:
1350 return MMC_BLK_ABORT;
a8ad82cc
SRT
1351 case ERR_NOMEDIUM:
1352 return MMC_BLK_NOMEDIUM;
d78d4a8a
PF
1353 case ERR_CONTINUE:
1354 break;
1355 }
1356 }
1357
1358 /*
1359 * Check for errors relating to the execution of the
1360 * initial command - such as address errors. No data
1361 * has been transferred.
1362 */
1363 if (brq->cmd.resp[0] & CMD_ERRORS) {
1364 pr_err("%s: r/w command failed, status = %#x\n",
1365 req->rq_disk->disk_name, brq->cmd.resp[0]);
1366 return MMC_BLK_ABORT;
1367 }
1368
1369 /*
1370 * Everything else is either success, or a data error of some
1371 * kind. If it was a write, we may have transitioned to
1372 * program mode, which we have to wait for it to complete.
1373 */
1374 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
c49433fb 1375 int err;
8fee476b 1376
c8760069
KY
1377 /* Check stop command response */
1378 if (brq->stop.resp[0] & R1_ERROR) {
1379 pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
1380 req->rq_disk->disk_name, __func__,
1381 brq->stop.resp[0]);
c44d6cef 1382 gen_err = true;
c8760069
KY
1383 }
1384
95a91298
UH
1385 err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, false, req,
1386 &gen_err);
c49433fb
UH
1387 if (err)
1388 return MMC_BLK_CMD_ERR;
d78d4a8a
PF
1389 }
1390
c8760069
KY
1391 /* if general error occurs, retry the write operation. */
1392 if (gen_err) {
1393 pr_warn("%s: retrying write for general error\n",
1394 req->rq_disk->disk_name);
1395 return MMC_BLK_RETRY;
1396 }
1397
d78d4a8a 1398 if (brq->data.error) {
b8360a49 1399 if (need_retune && !brq->retune_retry_done) {
09faf61d
RK
1400 pr_debug("%s: retrying because a re-tune was needed\n",
1401 req->rq_disk->disk_name);
b8360a49
AH
1402 brq->retune_retry_done = 1;
1403 return MMC_BLK_RETRY;
1404 }
d78d4a8a
PF
1405 pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
1406 req->rq_disk->disk_name, brq->data.error,
1407 (unsigned)blk_rq_pos(req),
1408 (unsigned)blk_rq_sectors(req),
1409 brq->cmd.resp[0], brq->stop.resp[0]);
1410
1411 if (rq_data_dir(req) == READ) {
67716327
AH
1412 if (ecc_err)
1413 return MMC_BLK_ECC_ERR;
d78d4a8a
PF
1414 return MMC_BLK_DATA_ERR;
1415 } else {
1416 return MMC_BLK_CMD_ERR;
1417 }
1418 }
1419
67716327
AH
1420 if (!brq->data.bytes_xfered)
1421 return MMC_BLK_RETRY;
d78d4a8a 1422
ce39f9d1
SJ
1423 if (mmc_packed_cmd(mq_mrq->cmd_type)) {
1424 if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered))
1425 return MMC_BLK_PARTIAL;
1426 else
1427 return MMC_BLK_SUCCESS;
1428 }
1429
67716327
AH
1430 if (blk_rq_bytes(req) != brq->data.bytes_xfered)
1431 return MMC_BLK_PARTIAL;
1432
1433 return MMC_BLK_SUCCESS;
d78d4a8a
PF
1434}
1435
e01071dd
LW
1436static int mmc_packed_init(struct mmc_queue *mq, struct mmc_card *card)
1437{
1438 struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
1439 struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
1440 int ret = 0;
1441
1442
1443 mqrq_cur->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
1444 if (!mqrq_cur->packed) {
1445 pr_warn("%s: unable to allocate packed cmd for mqrq_cur\n",
1446 mmc_card_name(card));
1447 ret = -ENOMEM;
1448 goto out;
1449 }
1450
1451 mqrq_prev->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
1452 if (!mqrq_prev->packed) {
1453 pr_warn("%s: unable to allocate packed cmd for mqrq_prev\n",
1454 mmc_card_name(card));
1455 kfree(mqrq_cur->packed);
1456 mqrq_cur->packed = NULL;
1457 ret = -ENOMEM;
1458 goto out;
1459 }
1460
1461 INIT_LIST_HEAD(&mqrq_cur->packed->list);
1462 INIT_LIST_HEAD(&mqrq_prev->packed->list);
1463
1464out:
1465 return ret;
1466}
1467
1468static void mmc_packed_clean(struct mmc_queue *mq)
1469{
1470 struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
1471 struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
1472
1473 kfree(mqrq_cur->packed);
1474 mqrq_cur->packed = NULL;
1475 kfree(mqrq_prev->packed);
1476 mqrq_prev->packed = NULL;
1477}
1478
8e8b3f51
LW
1479static enum mmc_blk_status mmc_blk_packed_err_check(struct mmc_card *card,
1480 struct mmc_async_req *areq)
ce39f9d1
SJ
1481{
1482 struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
1483 mmc_active);
1484 struct request *req = mq_rq->req;
1485 struct mmc_packed *packed = mq_rq->packed;
8e8b3f51
LW
1486 enum mmc_blk_status status, check;
1487 int err;
ce39f9d1
SJ
1488 u8 *ext_csd;
1489
ce39f9d1
SJ
1490 packed->retries--;
1491 check = mmc_blk_err_check(card, areq);
1492 err = get_card_status(card, &status, 0);
1493 if (err) {
1494 pr_err("%s: error %d sending status command\n",
1495 req->rq_disk->disk_name, err);
1496 return MMC_BLK_ABORT;
1497 }
1498
1499 if (status & R1_EXCEPTION_EVENT) {
86817ffb 1500 err = mmc_get_ext_csd(card, &ext_csd);
ce39f9d1
SJ
1501 if (err) {
1502 pr_err("%s: error %d sending ext_csd\n",
1503 req->rq_disk->disk_name, err);
86817ffb 1504 return MMC_BLK_ABORT;
ce39f9d1
SJ
1505 }
1506
1507 if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] &
1508 EXT_CSD_PACKED_FAILURE) &&
1509 (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
1510 EXT_CSD_PACKED_GENERIC_ERROR)) {
1511 if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
1512 EXT_CSD_PACKED_INDEXED_ERROR) {
1513 packed->idx_failure =
1514 ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1;
1515 check = MMC_BLK_PARTIAL;
1516 }
1517 pr_err("%s: packed cmd failed, nr %u, sectors %u, "
1518 "failure index: %d\n",
1519 req->rq_disk->disk_name, packed->nr_entries,
1520 packed->blocks, packed->idx_failure);
1521 }
ce39f9d1
SJ
1522 kfree(ext_csd);
1523 }
1524
1525 return check;
1526}
1527
54d49d77
PF
1528static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
1529 struct mmc_card *card,
1530 int disable_multi,
1531 struct mmc_queue *mq)
1da177e4 1532{
54d49d77
PF
1533 u32 readcmd, writecmd;
1534 struct mmc_blk_request *brq = &mqrq->brq;
1535 struct request *req = mqrq->req;
7db3028e 1536 struct mmc_blk_data *md = mq->blkdata;
4265900e 1537 bool do_data_tag;
1da177e4 1538
f4c5522b
AW
1539 /*
1540 * Reliable writes are used to implement Forced Unit Access and
d3df0465 1541 * are supported only on MMCs.
f4c5522b 1542 */
d3df0465 1543 bool do_rel_wr = (req->cmd_flags & REQ_FUA) &&
f4c5522b 1544 (rq_data_dir(req) == WRITE) &&
d0c97cfb 1545 (md->flags & MMC_BLK_REL_WR);
f4c5522b 1546
54d49d77
PF
1547 memset(brq, 0, sizeof(struct mmc_blk_request));
1548 brq->mrq.cmd = &brq->cmd;
1549 brq->mrq.data = &brq->data;
1da177e4 1550
54d49d77
PF
1551 brq->cmd.arg = blk_rq_pos(req);
1552 if (!mmc_card_blockaddr(card))
1553 brq->cmd.arg <<= 9;
1554 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
1555 brq->data.blksz = 512;
1556 brq->stop.opcode = MMC_STOP_TRANSMISSION;
1557 brq->stop.arg = 0;
54d49d77 1558 brq->data.blocks = blk_rq_sectors(req);
6a79e391 1559
54d49d77
PF
1560 /*
1561 * The block layer doesn't support all sector count
1562 * restrictions, so we need to be prepared for too big
1563 * requests.
1564 */
1565 if (brq->data.blocks > card->host->max_blk_count)
1566 brq->data.blocks = card->host->max_blk_count;
1da177e4 1567
2bf22b39
PW
1568 if (brq->data.blocks > 1) {
1569 /*
1570 * After a read error, we redo the request one sector
1571 * at a time in order to accurately determine which
1572 * sectors can be read successfully.
1573 */
1574 if (disable_multi)
1575 brq->data.blocks = 1;
1576
2e47e842
KM
1577 /*
1578 * Some controllers have HW issues while operating
1579 * in multiple I/O mode
1580 */
1581 if (card->host->ops->multi_io_quirk)
1582 brq->data.blocks = card->host->ops->multi_io_quirk(card,
1583 (rq_data_dir(req) == READ) ?
1584 MMC_DATA_READ : MMC_DATA_WRITE,
1585 brq->data.blocks);
2bf22b39 1586 }
d0c97cfb 1587
54d49d77
PF
1588 if (brq->data.blocks > 1 || do_rel_wr) {
1589 /* SPI multiblock writes terminate using a special
1590 * token, not a STOP_TRANSMISSION request.
d0c97cfb 1591 */
54d49d77
PF
1592 if (!mmc_host_is_spi(card->host) ||
1593 rq_data_dir(req) == READ)
1594 brq->mrq.stop = &brq->stop;
1595 readcmd = MMC_READ_MULTIPLE_BLOCK;
1596 writecmd = MMC_WRITE_MULTIPLE_BLOCK;
1597 } else {
1598 brq->mrq.stop = NULL;
1599 readcmd = MMC_READ_SINGLE_BLOCK;
1600 writecmd = MMC_WRITE_BLOCK;
1601 }
1602 if (rq_data_dir(req) == READ) {
1603 brq->cmd.opcode = readcmd;
f53f1102 1604 brq->data.flags = MMC_DATA_READ;
bcc3e172
UH
1605 if (brq->mrq.stop)
1606 brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 |
1607 MMC_CMD_AC;
54d49d77
PF
1608 } else {
1609 brq->cmd.opcode = writecmd;
f53f1102 1610 brq->data.flags = MMC_DATA_WRITE;
bcc3e172
UH
1611 if (brq->mrq.stop)
1612 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B |
1613 MMC_CMD_AC;
54d49d77 1614 }
d0c97cfb 1615
54d49d77
PF
1616 if (do_rel_wr)
1617 mmc_apply_rel_rw(brq, card, req);
f4c5522b 1618
4265900e
SD
1619 /*
1620 * Data tag is used only during writing meta data to speed
1621 * up write and any subsequent read of this meta data
1622 */
1623 do_data_tag = (card->ext_csd.data_tag_unit_size) &&
1624 (req->cmd_flags & REQ_META) &&
1625 (rq_data_dir(req) == WRITE) &&
1626 ((brq->data.blocks * brq->data.blksz) >=
1627 card->ext_csd.data_tag_unit_size);
1628
54d49d77
PF
1629 /*
1630 * Pre-defined multi-block transfers are preferable to
1631 * open ended-ones (and necessary for reliable writes).
1632 * However, it is not sufficient to just send CMD23,
1633 * and avoid the final CMD12, as on an error condition
1634 * CMD12 (stop) needs to be sent anyway. This, coupled
1635 * with Auto-CMD23 enhancements provided by some
1636 * hosts, means that the complexity of dealing
1637 * with this is best left to the host. If CMD23 is
1638 * supported by card and host, we'll fill sbc in and let
1639 * the host deal with handling it correctly. This means
1640 * that for hosts that don't expose MMC_CAP_CMD23, no
1641 * change of behavior will be observed.
1642 *
1643 * N.B: Some MMC cards experience perf degradation.
1644 * We'll avoid using CMD23-bounded multiblock writes for
1645 * these, while retaining features like reliable writes.
1646 */
4265900e
SD
1647 if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) &&
1648 (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) ||
1649 do_data_tag)) {
54d49d77
PF
1650 brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
1651 brq->sbc.arg = brq->data.blocks |
4265900e
SD
1652 (do_rel_wr ? (1 << 31) : 0) |
1653 (do_data_tag ? (1 << 29) : 0);
54d49d77
PF
1654 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
1655 brq->mrq.sbc = &brq->sbc;
1656 }
98ccf149 1657
54d49d77
PF
1658 mmc_set_data_timeout(&brq->data, card);
1659
1660 brq->data.sg = mqrq->sg;
1661 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
1662
1663 /*
1664 * Adjust the sg list so it is the same size as the
1665 * request.
1666 */
1667 if (brq->data.blocks != blk_rq_sectors(req)) {
1668 int i, data_size = brq->data.blocks << 9;
1669 struct scatterlist *sg;
1670
1671 for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
1672 data_size -= sg->length;
1673 if (data_size <= 0) {
1674 sg->length += data_size;
1675 i++;
1676 break;
6a79e391 1677 }
6a79e391 1678 }
54d49d77
PF
1679 brq->data.sg_len = i;
1680 }
1681
ee8a43a5
PF
1682 mqrq->mmc_active.mrq = &brq->mrq;
1683 mqrq->mmc_active.err_check = mmc_blk_err_check;
1684
54d49d77
PF
1685 mmc_queue_bounce_pre(mqrq);
1686}
6a79e391 1687
ce39f9d1
SJ
1688static inline u8 mmc_calc_packed_hdr_segs(struct request_queue *q,
1689 struct mmc_card *card)
1690{
1691 unsigned int hdr_sz = mmc_large_sector(card) ? 4096 : 512;
1692 unsigned int max_seg_sz = queue_max_segment_size(q);
1693 unsigned int len, nr_segs = 0;
1694
1695 do {
1696 len = min(hdr_sz, max_seg_sz);
1697 hdr_sz -= len;
1698 nr_segs++;
1699 } while (hdr_sz);
1700
1701 return nr_segs;
1702}
1703
1704static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
1705{
1706 struct request_queue *q = mq->queue;
1707 struct mmc_card *card = mq->card;
1708 struct request *cur = req, *next = NULL;
7db3028e 1709 struct mmc_blk_data *md = mq->blkdata;
ce39f9d1
SJ
1710 struct mmc_queue_req *mqrq = mq->mqrq_cur;
1711 bool en_rel_wr = card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN;
1712 unsigned int req_sectors = 0, phys_segments = 0;
1713 unsigned int max_blk_count, max_phys_segs;
1714 bool put_back = true;
1715 u8 max_packed_rw = 0;
1716 u8 reqs = 0;
1717
96e52daa
SL
1718 /*
1719 * We don't need to check packed for any further
1720 * operation of packed stuff as we set MMC_PACKED_NONE
1721 * and return zero for reqs if geting null packed. Also
1722 * we clean the flag of MMC_BLK_PACKED_CMD to avoid doing
1723 * it again when removing blk req.
1724 */
1725 if (!mqrq->packed) {
1726 md->flags &= (~MMC_BLK_PACKED_CMD);
1727 goto no_packed;
1728 }
1729
ce39f9d1
SJ
1730 if (!(md->flags & MMC_BLK_PACKED_CMD))
1731 goto no_packed;
1732
1733 if ((rq_data_dir(cur) == WRITE) &&
1734 mmc_host_packed_wr(card->host))
1735 max_packed_rw = card->ext_csd.max_packed_writes;
1736
1737 if (max_packed_rw == 0)
1738 goto no_packed;
1739
1740 if (mmc_req_rel_wr(cur) &&
1741 (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
1742 goto no_packed;
1743
1744 if (mmc_large_sector(card) &&
1745 !IS_ALIGNED(blk_rq_sectors(cur), 8))
1746 goto no_packed;
1747
1748 mmc_blk_clear_packed(mqrq);
1749
1750 max_blk_count = min(card->host->max_blk_count,
1751 card->host->max_req_size >> 9);
1752 if (unlikely(max_blk_count > 0xffff))
1753 max_blk_count = 0xffff;
1754
1755 max_phys_segs = queue_max_segments(q);
1756 req_sectors += blk_rq_sectors(cur);
1757 phys_segments += cur->nr_phys_segments;
1758
1759 if (rq_data_dir(cur) == WRITE) {
1760 req_sectors += mmc_large_sector(card) ? 8 : 1;
1761 phys_segments += mmc_calc_packed_hdr_segs(q, card);
1762 }
1763
1764 do {
1765 if (reqs >= max_packed_rw - 1) {
1766 put_back = false;
1767 break;
1768 }
1769
1770 spin_lock_irq(q->queue_lock);
1771 next = blk_fetch_request(q);
1772 spin_unlock_irq(q->queue_lock);
1773 if (!next) {
1774 put_back = false;
1775 break;
1776 }
1777
1778 if (mmc_large_sector(card) &&
1779 !IS_ALIGNED(blk_rq_sectors(next), 8))
1780 break;
1781
f2818bff 1782 if (mmc_req_is_special(next))
ce39f9d1
SJ
1783 break;
1784
1785 if (rq_data_dir(cur) != rq_data_dir(next))
1786 break;
1787
1788 if (mmc_req_rel_wr(next) &&
1789 (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
1790 break;
1791
1792 req_sectors += blk_rq_sectors(next);
1793 if (req_sectors > max_blk_count)
1794 break;
1795
1796 phys_segments += next->nr_phys_segments;
1797 if (phys_segments > max_phys_segs)
1798 break;
1799
1800 list_add_tail(&next->queuelist, &mqrq->packed->list);
1801 cur = next;
1802 reqs++;
1803 } while (1);
1804
1805 if (put_back) {
1806 spin_lock_irq(q->queue_lock);
1807 blk_requeue_request(q, next);
1808 spin_unlock_irq(q->queue_lock);
1809 }
1810
1811 if (reqs > 0) {
1812 list_add(&req->queuelist, &mqrq->packed->list);
1813 mqrq->packed->nr_entries = ++reqs;
1814 mqrq->packed->retries = reqs;
1815 return reqs;
1816 }
1817
1818no_packed:
1819 mqrq->cmd_type = MMC_PACKED_NONE;
1820 return 0;
1821}
1822
1823static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
1824 struct mmc_card *card,
1825 struct mmc_queue *mq)
1826{
1827 struct mmc_blk_request *brq = &mqrq->brq;
1828 struct request *req = mqrq->req;
1829 struct request *prq;
7db3028e 1830 struct mmc_blk_data *md = mq->blkdata;
ce39f9d1
SJ
1831 struct mmc_packed *packed = mqrq->packed;
1832 bool do_rel_wr, do_data_tag;
3f2d2664 1833 __le32 *packed_cmd_hdr;
ce39f9d1
SJ
1834 u8 hdr_blocks;
1835 u8 i = 1;
1836
ce39f9d1
SJ
1837 mqrq->cmd_type = MMC_PACKED_WRITE;
1838 packed->blocks = 0;
1839 packed->idx_failure = MMC_PACKED_NR_IDX;
1840
1841 packed_cmd_hdr = packed->cmd_hdr;
1842 memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr));
f68381a7
TK
1843 packed_cmd_hdr[0] = cpu_to_le32((packed->nr_entries << 16) |
1844 (PACKED_CMD_WR << 8) | PACKED_CMD_VER);
ce39f9d1
SJ
1845 hdr_blocks = mmc_large_sector(card) ? 8 : 1;
1846
1847 /*
1848 * Argument for each entry of packed group
1849 */
1850 list_for_each_entry(prq, &packed->list, queuelist) {
1851 do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR);
1852 do_data_tag = (card->ext_csd.data_tag_unit_size) &&
1853 (prq->cmd_flags & REQ_META) &&
1854 (rq_data_dir(prq) == WRITE) &&
d806b46e 1855 blk_rq_bytes(prq) >= card->ext_csd.data_tag_unit_size;
ce39f9d1 1856 /* Argument of CMD23 */
f68381a7 1857 packed_cmd_hdr[(i * 2)] = cpu_to_le32(
ce39f9d1
SJ
1858 (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
1859 (do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) |
f68381a7 1860 blk_rq_sectors(prq));
ce39f9d1 1861 /* Argument of CMD18 or CMD25 */
f68381a7 1862 packed_cmd_hdr[((i * 2)) + 1] = cpu_to_le32(
ce39f9d1 1863 mmc_card_blockaddr(card) ?
f68381a7 1864 blk_rq_pos(prq) : blk_rq_pos(prq) << 9);
ce39f9d1
SJ
1865 packed->blocks += blk_rq_sectors(prq);
1866 i++;
1867 }
1868
1869 memset(brq, 0, sizeof(struct mmc_blk_request));
1870 brq->mrq.cmd = &brq->cmd;
1871 brq->mrq.data = &brq->data;
1872 brq->mrq.sbc = &brq->sbc;
1873 brq->mrq.stop = &brq->stop;
1874
1875 brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
1876 brq->sbc.arg = MMC_CMD23_ARG_PACKED | (packed->blocks + hdr_blocks);
1877 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
1878
1879 brq->cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
1880 brq->cmd.arg = blk_rq_pos(req);
1881 if (!mmc_card_blockaddr(card))
1882 brq->cmd.arg <<= 9;
1883 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
1884
1885 brq->data.blksz = 512;
1886 brq->data.blocks = packed->blocks + hdr_blocks;
f53f1102 1887 brq->data.flags = MMC_DATA_WRITE;
ce39f9d1
SJ
1888
1889 brq->stop.opcode = MMC_STOP_TRANSMISSION;
1890 brq->stop.arg = 0;
1891 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1892
1893 mmc_set_data_timeout(&brq->data, card);
1894
1895 brq->data.sg = mqrq->sg;
1896 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
1897
1898 mqrq->mmc_active.mrq = &brq->mrq;
1899 mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
1900
1901 mmc_queue_bounce_pre(mqrq);
1902}
1903
67716327
AH
1904static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
1905 struct mmc_blk_request *brq, struct request *req,
1906 int ret)
1907{
ce39f9d1
SJ
1908 struct mmc_queue_req *mq_rq;
1909 mq_rq = container_of(brq, struct mmc_queue_req, brq);
1910
67716327
AH
1911 /*
1912 * If this is an SD card and we're writing, we can first
1913 * mark the known good sectors as ok.
1914 *
1915 * If the card is not SD, we can still ok written sectors
1916 * as reported by the controller (which might be less than
1917 * the real number of written sectors, but never more).
1918 */
1919 if (mmc_card_sd(card)) {
1920 u32 blocks;
1921
1922 blocks = mmc_sd_num_wr_blocks(card);
1923 if (blocks != (u32)-1) {
ecf8b5d0 1924 ret = blk_end_request(req, 0, blocks << 9);
67716327
AH
1925 }
1926 } else {
ce39f9d1
SJ
1927 if (!mmc_packed_cmd(mq_rq->cmd_type))
1928 ret = blk_end_request(req, 0, brq->data.bytes_xfered);
67716327
AH
1929 }
1930 return ret;
1931}
1932
ce39f9d1
SJ
1933static int mmc_blk_end_packed_req(struct mmc_queue_req *mq_rq)
1934{
1935 struct request *prq;
1936 struct mmc_packed *packed = mq_rq->packed;
1937 int idx = packed->idx_failure, i = 0;
1938 int ret = 0;
1939
ce39f9d1
SJ
1940 while (!list_empty(&packed->list)) {
1941 prq = list_entry_rq(packed->list.next);
1942 if (idx == i) {
1943 /* retry from error index */
1944 packed->nr_entries -= idx;
1945 mq_rq->req = prq;
1946 ret = 1;
1947
1948 if (packed->nr_entries == MMC_PACKED_NR_SINGLE) {
1949 list_del_init(&prq->queuelist);
1950 mmc_blk_clear_packed(mq_rq);
1951 }
1952 return ret;
1953 }
1954 list_del_init(&prq->queuelist);
1955 blk_end_request(prq, 0, blk_rq_bytes(prq));
1956 i++;
1957 }
1958
1959 mmc_blk_clear_packed(mq_rq);
1960 return ret;
1961}
1962
1963static void mmc_blk_abort_packed_req(struct mmc_queue_req *mq_rq)
1964{
1965 struct request *prq;
1966 struct mmc_packed *packed = mq_rq->packed;
1967
ce39f9d1
SJ
1968 while (!list_empty(&packed->list)) {
1969 prq = list_entry_rq(packed->list.next);
1970 list_del_init(&prq->queuelist);
1971 blk_end_request(prq, -EIO, blk_rq_bytes(prq));
1972 }
1973
1974 mmc_blk_clear_packed(mq_rq);
1975}
1976
1977static void mmc_blk_revert_packed_req(struct mmc_queue *mq,
1978 struct mmc_queue_req *mq_rq)
1979{
1980 struct request *prq;
1981 struct request_queue *q = mq->queue;
1982 struct mmc_packed *packed = mq_rq->packed;
1983
ce39f9d1
SJ
1984 while (!list_empty(&packed->list)) {
1985 prq = list_entry_rq(packed->list.prev);
1986 if (prq->queuelist.prev != &packed->list) {
1987 list_del_init(&prq->queuelist);
1988 spin_lock_irq(q->queue_lock);
1989 blk_requeue_request(mq->queue, prq);
1990 spin_unlock_irq(q->queue_lock);
1991 } else {
1992 list_del_init(&prq->queuelist);
1993 }
1994 }
1995
1996 mmc_blk_clear_packed(mq_rq);
1997}
1998
ee8a43a5 1999static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
54d49d77 2000{
7db3028e 2001 struct mmc_blk_data *md = mq->blkdata;
54d49d77
PF
2002 struct mmc_card *card = md->queue.card;
2003 struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
b8360a49 2004 int ret = 1, disable_multi = 0, retry = 0, type, retune_retry_done = 0;
d78d4a8a 2005 enum mmc_blk_status status;
ee8a43a5 2006 struct mmc_queue_req *mq_rq;
a5075eb9 2007 struct request *req = rqc;
ee8a43a5 2008 struct mmc_async_req *areq;
ce39f9d1
SJ
2009 const u8 packed_nr = 2;
2010 u8 reqs = 0;
1da177e4 2011
ee8a43a5
PF
2012 if (!rqc && !mq->mqrq_prev->req)
2013 return 0;
98ccf149 2014
ce39f9d1
SJ
2015 if (rqc)
2016 reqs = mmc_blk_prep_packed_list(mq, rqc);
2017
ee8a43a5
PF
2018 do {
2019 if (rqc) {
a5075eb9
SD
2020 /*
2021 * When 4KB native sector is enabled, only 8 blocks
2022 * multiple read or write is allowed
2023 */
e87c8561
YJ
2024 if (mmc_large_sector(card) &&
2025 !IS_ALIGNED(blk_rq_sectors(rqc), 8)) {
a5075eb9
SD
2026 pr_err("%s: Transfer size is not 4KB sector size aligned\n",
2027 req->rq_disk->disk_name);
ce39f9d1 2028 mq_rq = mq->mqrq_cur;
a5075eb9
SD
2029 goto cmd_abort;
2030 }
ce39f9d1
SJ
2031
2032 if (reqs >= packed_nr)
2033 mmc_blk_packed_hdr_wrq_prep(mq->mqrq_cur,
2034 card, mq);
2035 else
2036 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
ee8a43a5
PF
2037 areq = &mq->mqrq_cur->mmc_active;
2038 } else
2039 areq = NULL;
8e8b3f51 2040 areq = mmc_start_req(card->host, areq, &status);
2220eedf
KD
2041 if (!areq) {
2042 if (status == MMC_BLK_NEW_REQUEST)
2043 mq->flags |= MMC_QUEUE_NEW_REQUEST;
ee8a43a5 2044 return 0;
2220eedf 2045 }
ee8a43a5
PF
2046
2047 mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
2048 brq = &mq_rq->brq;
2049 req = mq_rq->req;
67716327 2050 type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
ee8a43a5 2051 mmc_queue_bounce_post(mq_rq);
98ccf149 2052
d78d4a8a
PF
2053 switch (status) {
2054 case MMC_BLK_SUCCESS:
2055 case MMC_BLK_PARTIAL:
2056 /*
2057 * A block was successfully transferred.
2058 */
67716327 2059 mmc_blk_reset_success(md, type);
ce39f9d1
SJ
2060
2061 if (mmc_packed_cmd(mq_rq->cmd_type)) {
2062 ret = mmc_blk_end_packed_req(mq_rq);
2063 break;
2064 } else {
2065 ret = blk_end_request(req, 0,
d78d4a8a 2066 brq->data.bytes_xfered);
ce39f9d1
SJ
2067 }
2068
67716327
AH
2069 /*
2070 * If the blk_end_request function returns non-zero even
2071 * though all data has been transferred and no errors
2072 * were returned by the host controller, it's a bug.
2073 */
ee8a43a5 2074 if (status == MMC_BLK_SUCCESS && ret) {
a3c76eb9 2075 pr_err("%s BUG rq_tot %d d_xfer %d\n",
ee8a43a5
PF
2076 __func__, blk_rq_bytes(req),
2077 brq->data.bytes_xfered);
2078 rqc = NULL;
2079 goto cmd_abort;
2080 }
d78d4a8a
PF
2081 break;
2082 case MMC_BLK_CMD_ERR:
67716327 2083 ret = mmc_blk_cmd_err(md, card, brq, req, ret);
29535f7b
DW
2084 if (mmc_blk_reset(md, card->host, type))
2085 goto cmd_abort;
2086 if (!ret)
2087 goto start_new_req;
2088 break;
d78d4a8a 2089 case MMC_BLK_RETRY:
b8360a49 2090 retune_retry_done = brq->retune_retry_done;
d78d4a8a 2091 if (retry++ < 5)
a01f3ccf 2092 break;
67716327 2093 /* Fall through */
d78d4a8a 2094 case MMC_BLK_ABORT:
67716327
AH
2095 if (!mmc_blk_reset(md, card->host, type))
2096 break;
4c2b8f26 2097 goto cmd_abort;
67716327
AH
2098 case MMC_BLK_DATA_ERR: {
2099 int err;
2100
2101 err = mmc_blk_reset(md, card->host, type);
2102 if (!err)
2103 break;
ce39f9d1
SJ
2104 if (err == -ENODEV ||
2105 mmc_packed_cmd(mq_rq->cmd_type))
67716327
AH
2106 goto cmd_abort;
2107 /* Fall through */
2108 }
2109 case MMC_BLK_ECC_ERR:
2110 if (brq->data.blocks > 1) {
2111 /* Redo read one sector at a time */
6606110d
JP
2112 pr_warn("%s: retrying using single block read\n",
2113 req->rq_disk->disk_name);
67716327
AH
2114 disable_multi = 1;
2115 break;
2116 }
d78d4a8a
PF
2117 /*
2118 * After an error, we redo I/O one sector at a
2119 * time, so we only reach here after trying to
2120 * read a single sector.
2121 */
ecf8b5d0 2122 ret = blk_end_request(req, -EIO,
d78d4a8a 2123 brq->data.blksz);
ee8a43a5
PF
2124 if (!ret)
2125 goto start_new_req;
d78d4a8a 2126 break;
a8ad82cc
SRT
2127 case MMC_BLK_NOMEDIUM:
2128 goto cmd_abort;
2220eedf
KD
2129 default:
2130 pr_err("%s: Unhandled return value (%d)",
2131 req->rq_disk->disk_name, status);
2132 goto cmd_abort;
4c2b8f26
RKAL
2133 }
2134
ee8a43a5 2135 if (ret) {
ce39f9d1
SJ
2136 if (mmc_packed_cmd(mq_rq->cmd_type)) {
2137 if (!mq_rq->packed->retries)
2138 goto cmd_abort;
2139 mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq);
2140 mmc_start_req(card->host,
2141 &mq_rq->mmc_active, NULL);
2142 } else {
2143
2144 /*
2145 * In case of a incomplete request
2146 * prepare it again and resend.
2147 */
2148 mmc_blk_rw_rq_prep(mq_rq, card,
2149 disable_multi, mq);
2150 mmc_start_req(card->host,
2151 &mq_rq->mmc_active, NULL);
2152 }
b8360a49 2153 mq_rq->brq.retune_retry_done = retune_retry_done;
ee8a43a5 2154 }
1da177e4
LT
2155 } while (ret);
2156
1da177e4
LT
2157 return 1;
2158
a01f3ccf 2159 cmd_abort:
ce39f9d1
SJ
2160 if (mmc_packed_cmd(mq_rq->cmd_type)) {
2161 mmc_blk_abort_packed_req(mq_rq);
2162 } else {
2163 if (mmc_card_removed(card))
2164 req->cmd_flags |= REQ_QUIET;
2165 while (ret)
2166 ret = blk_end_request(req, -EIO,
2167 blk_rq_cur_bytes(req));
2168 }
1da177e4 2169
ee8a43a5
PF
2170 start_new_req:
2171 if (rqc) {
7a81902f
SJ
2172 if (mmc_card_removed(card)) {
2173 rqc->cmd_flags |= REQ_QUIET;
2174 blk_end_request_all(rqc, -EIO);
2175 } else {
ce39f9d1
SJ
2176 /*
2177 * If current request is packed, it needs to put back.
2178 */
2179 if (mmc_packed_cmd(mq->mqrq_cur->cmd_type))
2180 mmc_blk_revert_packed_req(mq, mq->mqrq_cur);
2181
7a81902f
SJ
2182 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
2183 mmc_start_req(card->host,
2184 &mq->mqrq_cur->mmc_active, NULL);
2185 }
ee8a43a5
PF
2186 }
2187
1da177e4
LT
2188 return 0;
2189}
2190
29eb7bd0 2191int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
bd788c96 2192{
1a258db6 2193 int ret;
7db3028e 2194 struct mmc_blk_data *md = mq->blkdata;
1a258db6 2195 struct mmc_card *card = md->queue.card;
2220eedf
KD
2196 struct mmc_host *host = card->host;
2197 unsigned long flags;
869c5548 2198 bool req_is_special = mmc_req_is_special(req);
1a258db6 2199
ee8a43a5
PF
2200 if (req && !mq->mqrq_prev->req)
2201 /* claim host only for the first request */
e94cfef6 2202 mmc_get_card(card);
ee8a43a5 2203
371a689f
AW
2204 ret = mmc_blk_part_switch(card, md);
2205 if (ret) {
0d7d85ca 2206 if (req) {
ecf8b5d0 2207 blk_end_request_all(req, -EIO);
0d7d85ca 2208 }
371a689f
AW
2209 ret = 0;
2210 goto out;
2211 }
1a258db6 2212
2220eedf 2213 mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
c2df40df 2214 if (req && req_op(req) == REQ_OP_DISCARD) {
ee8a43a5
PF
2215 /* complete ongoing async transfer before issuing discard */
2216 if (card->host->areq)
2217 mmc_blk_issue_rw_rq(mq, NULL);
288dab8a
CH
2218 ret = mmc_blk_issue_discard_rq(mq, req);
2219 } else if (req && req_op(req) == REQ_OP_SECURE_ERASE) {
2220 /* complete ongoing async transfer before issuing secure erase*/
2221 if (card->host->areq)
2222 mmc_blk_issue_rw_rq(mq, NULL);
2223 ret = mmc_blk_issue_secdiscard_rq(mq, req);
3a5e02ce 2224 } else if (req && req_op(req) == REQ_OP_FLUSH) {
393f9a08
JC
2225 /* complete ongoing async transfer before issuing flush */
2226 if (card->host->areq)
2227 mmc_blk_issue_rw_rq(mq, NULL);
1a258db6 2228 ret = mmc_blk_issue_flush(mq, req);
49804548 2229 } else {
2220eedf
KD
2230 if (!req && host->areq) {
2231 spin_lock_irqsave(&host->context_info.lock, flags);
2232 host->context_info.is_waiting_last_req = true;
2233 spin_unlock_irqrestore(&host->context_info.lock, flags);
2234 }
1a258db6 2235 ret = mmc_blk_issue_rw_rq(mq, req);
49804548 2236 }
1a258db6 2237
371a689f 2238out:
869c5548 2239 if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) || req_is_special)
ef3a69c7
SJ
2240 /*
2241 * Release host when there are no more requests
2242 * and after special request(discard, flush) is done.
2243 * In case sepecial request, there is no reentry to
2244 * the 'mmc_blk_issue_rq' with 'mqrq_prev->req'.
2245 */
e94cfef6 2246 mmc_put_card(card);
1a258db6 2247 return ret;
bd788c96 2248}
1da177e4 2249
a6f6c96b
RK
2250static inline int mmc_blk_readonly(struct mmc_card *card)
2251{
2252 return mmc_card_readonly(card) ||
2253 !(card->csd.cmdclass & CCC_BLOCK_WRITE);
2254}
2255
371a689f
AW
2256static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
2257 struct device *parent,
2258 sector_t size,
2259 bool default_ro,
add710ea
JR
2260 const char *subname,
2261 int area_type)
1da177e4
LT
2262{
2263 struct mmc_blk_data *md;
2264 int devidx, ret;
2265
b10fa99e
UH
2266again:
2267 if (!ida_pre_get(&mmc_blk_ida, GFP_KERNEL))
2268 return ERR_PTR(-ENOMEM);
2269
2270 spin_lock(&mmc_blk_lock);
2271 ret = ida_get_new(&mmc_blk_ida, &devidx);
2272 spin_unlock(&mmc_blk_lock);
2273
2274 if (ret == -EAGAIN)
2275 goto again;
2276 else if (ret)
2277 return ERR_PTR(ret);
2278
2279 if (devidx >= max_devices) {
2280 ret = -ENOSPC;
2281 goto out;
2282 }
1da177e4 2283
dd00cc48 2284 md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
a6f6c96b
RK
2285 if (!md) {
2286 ret = -ENOMEM;
2287 goto out;
2288 }
1da177e4 2289
add710ea
JR
2290 md->area_type = area_type;
2291
a6f6c96b
RK
2292 /*
2293 * Set the read-only status based on the supported commands
2294 * and the write protect switch.
2295 */
2296 md->read_only = mmc_blk_readonly(card);
1da177e4 2297
5e71b7a6 2298 md->disk = alloc_disk(perdev_minors);
a6f6c96b
RK
2299 if (md->disk == NULL) {
2300 ret = -ENOMEM;
2301 goto err_kfree;
2302 }
1da177e4 2303
a6f6c96b 2304 spin_lock_init(&md->lock);
371a689f 2305 INIT_LIST_HEAD(&md->part);
a6f6c96b 2306 md->usage = 1;
1da177e4 2307
d09408ad 2308 ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
a6f6c96b
RK
2309 if (ret)
2310 goto err_putdisk;
1da177e4 2311
7db3028e 2312 md->queue.blkdata = md;
d2b18394 2313
fe6b4c88 2314 md->disk->major = MMC_BLOCK_MAJOR;
5e71b7a6 2315 md->disk->first_minor = devidx * perdev_minors;
a6f6c96b
RK
2316 md->disk->fops = &mmc_bdops;
2317 md->disk->private_data = md;
2318 md->disk->queue = md->queue.queue;
307d8e6f 2319 md->parent = parent;
371a689f 2320 set_disk_ro(md->disk, md->read_only || default_ro);
382c55f8 2321 md->disk->flags = GENHD_FL_EXT_DEVT;
f5b4d71f 2322 if (area_type & (MMC_BLK_DATA_AREA_RPMB | MMC_BLK_DATA_AREA_BOOT))
53d8f974 2323 md->disk->flags |= GENHD_FL_NO_PART_SCAN;
a6f6c96b
RK
2324
2325 /*
2326 * As discussed on lkml, GENHD_FL_REMOVABLE should:
2327 *
2328 * - be set for removable media with permanent block devices
2329 * - be unset for removable block devices with permanent media
2330 *
2331 * Since MMC block devices clearly fall under the second
2332 * case, we do not set GENHD_FL_REMOVABLE. Userspace
2333 * should use the block device creation/destruction hotplug
2334 * messages to tell when the card is present.
2335 */
2336
f06c9153 2337 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
9aaf3437 2338 "mmcblk%u%s", card->host->index, subname ? subname : "");
a6f6c96b 2339
a5075eb9
SD
2340 if (mmc_card_mmc(card))
2341 blk_queue_logical_block_size(md->queue.queue,
2342 card->ext_csd.data_sector_size);
2343 else
2344 blk_queue_logical_block_size(md->queue.queue, 512);
2345
371a689f 2346 set_capacity(md->disk, size);
d0c97cfb 2347
f0d89972 2348 if (mmc_host_cmd23(card->host)) {
0ed50abb
DG
2349 if ((mmc_card_mmc(card) &&
2350 card->csd.mmca_vsn >= CSD_SPEC_VER_3) ||
f0d89972
AW
2351 (mmc_card_sd(card) &&
2352 card->scr.cmds & SD_SCR_CMD23_SUPPORT))
2353 md->flags |= MMC_BLK_CMD23;
2354 }
d0c97cfb
AW
2355
2356 if (mmc_card_mmc(card) &&
2357 md->flags & MMC_BLK_CMD23 &&
2358 ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
2359 card->ext_csd.rel_sectors)) {
2360 md->flags |= MMC_BLK_REL_WR;
e9d5c746 2361 blk_queue_write_cache(md->queue.queue, true, true);
d0c97cfb
AW
2362 }
2363
ce39f9d1
SJ
2364 if (mmc_card_mmc(card) &&
2365 (area_type == MMC_BLK_DATA_AREA_MAIN) &&
2366 (md->flags & MMC_BLK_CMD23) &&
2367 card->ext_csd.packed_event_en) {
2368 if (!mmc_packed_init(&md->queue, card))
2369 md->flags |= MMC_BLK_PACKED_CMD;
2370 }
2371
371a689f
AW
2372 return md;
2373
2374 err_putdisk:
2375 put_disk(md->disk);
2376 err_kfree:
2377 kfree(md);
2378 out:
b10fa99e
UH
2379 spin_lock(&mmc_blk_lock);
2380 ida_remove(&mmc_blk_ida, devidx);
2381 spin_unlock(&mmc_blk_lock);
371a689f
AW
2382 return ERR_PTR(ret);
2383}
2384
2385static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
2386{
2387 sector_t size;
a6f6c96b 2388
85a18ad9
PO
2389 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
2390 /*
2391 * The EXT_CSD sector count is in number or 512 byte
2392 * sectors.
2393 */
371a689f 2394 size = card->ext_csd.sectors;
85a18ad9
PO
2395 } else {
2396 /*
2397 * The CSD capacity field is in units of read_blkbits.
2398 * set_capacity takes units of 512 bytes.
2399 */
087de9ed
KM
2400 size = (typeof(sector_t))card->csd.capacity
2401 << (card->csd.read_blkbits - 9);
85a18ad9 2402 }
371a689f 2403
7a30f2af 2404 return mmc_blk_alloc_req(card, &card->dev, size, false, NULL,
add710ea 2405 MMC_BLK_DATA_AREA_MAIN);
371a689f 2406}
a6f6c96b 2407
371a689f
AW
2408static int mmc_blk_alloc_part(struct mmc_card *card,
2409 struct mmc_blk_data *md,
2410 unsigned int part_type,
2411 sector_t size,
2412 bool default_ro,
add710ea
JR
2413 const char *subname,
2414 int area_type)
371a689f
AW
2415{
2416 char cap_str[10];
2417 struct mmc_blk_data *part_md;
2418
2419 part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
add710ea 2420 subname, area_type);
371a689f
AW
2421 if (IS_ERR(part_md))
2422 return PTR_ERR(part_md);
2423 part_md->part_type = part_type;
2424 list_add(&part_md->part, &md->part);
2425
b9f28d86 2426 string_get_size((u64)get_capacity(part_md->disk), 512, STRING_UNITS_2,
371a689f 2427 cap_str, sizeof(cap_str));
a3c76eb9 2428 pr_info("%s: %s %s partition %u %s\n",
371a689f
AW
2429 part_md->disk->disk_name, mmc_card_id(card),
2430 mmc_card_name(card), part_md->part_type, cap_str);
2431 return 0;
2432}
2433
e0c368d5
NJ
2434/* MMC Physical partitions consist of two boot partitions and
2435 * up to four general purpose partitions.
2436 * For each partition enabled in EXT_CSD a block device will be allocatedi
2437 * to provide access to the partition.
2438 */
2439
371a689f
AW
2440static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
2441{
e0c368d5 2442 int idx, ret = 0;
371a689f
AW
2443
2444 if (!mmc_card_mmc(card))
2445 return 0;
2446
e0c368d5
NJ
2447 for (idx = 0; idx < card->nr_parts; idx++) {
2448 if (card->part[idx].size) {
2449 ret = mmc_blk_alloc_part(card, md,
2450 card->part[idx].part_cfg,
2451 card->part[idx].size >> 9,
2452 card->part[idx].force_ro,
add710ea
JR
2453 card->part[idx].name,
2454 card->part[idx].area_type);
e0c368d5
NJ
2455 if (ret)
2456 return ret;
2457 }
371a689f
AW
2458 }
2459
2460 return ret;
1da177e4
LT
2461}
2462
371a689f
AW
2463static void mmc_blk_remove_req(struct mmc_blk_data *md)
2464{
add710ea
JR
2465 struct mmc_card *card;
2466
371a689f 2467 if (md) {
fdfa20c1
PT
2468 /*
2469 * Flush remaining requests and free queues. It
2470 * is freeing the queue that stops new requests
2471 * from being accepted.
2472 */
8efb83a2 2473 card = md->queue.card;
fdfa20c1
PT
2474 mmc_cleanup_queue(&md->queue);
2475 if (md->flags & MMC_BLK_PACKED_CMD)
2476 mmc_packed_clean(&md->queue);
371a689f
AW
2477 if (md->disk->flags & GENHD_FL_UP) {
2478 device_remove_file(disk_to_dev(md->disk), &md->force_ro);
add710ea
JR
2479 if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
2480 card->ext_csd.boot_ro_lockable)
2481 device_remove_file(disk_to_dev(md->disk),
2482 &md->power_ro_lock);
371a689f 2483
371a689f
AW
2484 del_gendisk(md->disk);
2485 }
371a689f
AW
2486 mmc_blk_put(md);
2487 }
2488}
2489
2490static void mmc_blk_remove_parts(struct mmc_card *card,
2491 struct mmc_blk_data *md)
2492{
2493 struct list_head *pos, *q;
2494 struct mmc_blk_data *part_md;
2495
2496 list_for_each_safe(pos, q, &md->part) {
2497 part_md = list_entry(pos, struct mmc_blk_data, part);
2498 list_del(pos);
2499 mmc_blk_remove_req(part_md);
2500 }
2501}
2502
2503static int mmc_add_disk(struct mmc_blk_data *md)
2504{
2505 int ret;
add710ea 2506 struct mmc_card *card = md->queue.card;
371a689f 2507
307d8e6f 2508 device_add_disk(md->parent, md->disk);
371a689f
AW
2509 md->force_ro.show = force_ro_show;
2510 md->force_ro.store = force_ro_store;
641c3187 2511 sysfs_attr_init(&md->force_ro.attr);
371a689f
AW
2512 md->force_ro.attr.name = "force_ro";
2513 md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
2514 ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
2515 if (ret)
add710ea
JR
2516 goto force_ro_fail;
2517
2518 if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
2519 card->ext_csd.boot_ro_lockable) {
88187398 2520 umode_t mode;
add710ea
JR
2521
2522 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS)
2523 mode = S_IRUGO;
2524 else
2525 mode = S_IRUGO | S_IWUSR;
2526
2527 md->power_ro_lock.show = power_ro_lock_show;
2528 md->power_ro_lock.store = power_ro_lock_store;
00d9ac08 2529 sysfs_attr_init(&md->power_ro_lock.attr);
add710ea
JR
2530 md->power_ro_lock.attr.mode = mode;
2531 md->power_ro_lock.attr.name =
2532 "ro_lock_until_next_power_on";
2533 ret = device_create_file(disk_to_dev(md->disk),
2534 &md->power_ro_lock);
2535 if (ret)
2536 goto power_ro_lock_fail;
2537 }
2538 return ret;
2539
2540power_ro_lock_fail:
2541 device_remove_file(disk_to_dev(md->disk), &md->force_ro);
2542force_ro_fail:
2543 del_gendisk(md->disk);
371a689f
AW
2544
2545 return ret;
2546}
2547
6f60c222
AW
2548static const struct mmc_fixup blk_fixups[] =
2549{
c59d4473
CB
2550 MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk,
2551 MMC_QUIRK_INAND_CMD38),
2552 MMC_FIXUP("SEM04G", CID_MANFID_SANDISK, 0x100, add_quirk,
2553 MMC_QUIRK_INAND_CMD38),
2554 MMC_FIXUP("SEM08G", CID_MANFID_SANDISK, 0x100, add_quirk,
2555 MMC_QUIRK_INAND_CMD38),
2556 MMC_FIXUP("SEM16G", CID_MANFID_SANDISK, 0x100, add_quirk,
2557 MMC_QUIRK_INAND_CMD38),
2558 MMC_FIXUP("SEM32G", CID_MANFID_SANDISK, 0x100, add_quirk,
2559 MMC_QUIRK_INAND_CMD38),
d0c97cfb
AW
2560
2561 /*
2562 * Some MMC cards experience performance degradation with CMD23
2563 * instead of CMD12-bounded multiblock transfers. For now we'll
2564 * black list what's bad...
2565 * - Certain Toshiba cards.
2566 *
2567 * N.B. This doesn't affect SD cards.
2568 */
7d70d476
YL
2569 MMC_FIXUP("SDMB-32", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc,
2570 MMC_QUIRK_BLK_NO_CMD23),
2571 MMC_FIXUP("SDM032", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc,
2572 MMC_QUIRK_BLK_NO_CMD23),
c59d4473 2573 MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
d0c97cfb 2574 MMC_QUIRK_BLK_NO_CMD23),
c59d4473 2575 MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
d0c97cfb 2576 MMC_QUIRK_BLK_NO_CMD23),
c59d4473 2577 MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
d0c97cfb 2578 MMC_QUIRK_BLK_NO_CMD23),
6de5fc9c
SNX
2579
2580 /*
32ecd320 2581 * Some MMC cards need longer data read timeout than indicated in CSD.
6de5fc9c 2582 */
c59d4473 2583 MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
6de5fc9c 2584 MMC_QUIRK_LONG_READ_TIME),
32ecd320
MG
2585 MMC_FIXUP("008GE0", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
2586 MMC_QUIRK_LONG_READ_TIME),
6de5fc9c 2587
3550ccdb
IC
2588 /*
2589 * On these Samsung MoviNAND parts, performing secure erase or
2590 * secure trim can result in unrecoverable corruption due to a
2591 * firmware bug.
2592 */
2593 MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2594 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2595 MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2596 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2597 MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2598 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2599 MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2600 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2601 MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2602 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2603 MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2604 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2605 MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2606 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2607 MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2608 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2609
b5b4ff0a
SL
2610 /*
2611 * On Some Kingston eMMCs, performing trim can result in
2612 * unrecoverable data conrruption occasionally due to a firmware bug.
2613 */
2614 MMC_FIXUP("V10008", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
2615 MMC_QUIRK_TRIM_BROKEN),
2616 MMC_FIXUP("V10016", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
2617 MMC_QUIRK_TRIM_BROKEN),
2618
6f60c222
AW
2619 END_FIXUP
2620};
2621
96541bac 2622static int mmc_blk_probe(struct mmc_card *card)
1da177e4 2623{
371a689f 2624 struct mmc_blk_data *md, *part_md;
a7bbb573
PO
2625 char cap_str[10];
2626
912490db
PO
2627 /*
2628 * Check that the card supports the command class(es) we need.
2629 */
2630 if (!(card->csd.cmdclass & CCC_BLOCK_READ))
1da177e4
LT
2631 return -ENODEV;
2632
5204d00f
LC
2633 mmc_fixup_device(card, blk_fixups);
2634
1da177e4
LT
2635 md = mmc_blk_alloc(card);
2636 if (IS_ERR(md))
2637 return PTR_ERR(md);
2638
b9f28d86 2639 string_get_size((u64)get_capacity(md->disk), 512, STRING_UNITS_2,
a7bbb573 2640 cap_str, sizeof(cap_str));
a3c76eb9 2641 pr_info("%s: %s %s %s %s\n",
1da177e4 2642 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
a7bbb573 2643 cap_str, md->read_only ? "(ro)" : "");
1da177e4 2644
371a689f
AW
2645 if (mmc_blk_alloc_parts(card, md))
2646 goto out;
2647
96541bac 2648 dev_set_drvdata(&card->dev, md);
6f60c222 2649
371a689f
AW
2650 if (mmc_add_disk(md))
2651 goto out;
2652
2653 list_for_each_entry(part_md, &md->part, part) {
2654 if (mmc_add_disk(part_md))
2655 goto out;
2656 }
e94cfef6
UH
2657
2658 pm_runtime_set_autosuspend_delay(&card->dev, 3000);
2659 pm_runtime_use_autosuspend(&card->dev);
2660
2661 /*
2662 * Don't enable runtime PM for SD-combo cards here. Leave that
2663 * decision to be taken during the SDIO init sequence instead.
2664 */
2665 if (card->type != MMC_TYPE_SD_COMBO) {
2666 pm_runtime_set_active(&card->dev);
2667 pm_runtime_enable(&card->dev);
2668 }
2669
1da177e4
LT
2670 return 0;
2671
2672 out:
371a689f
AW
2673 mmc_blk_remove_parts(card, md);
2674 mmc_blk_remove_req(md);
5865f287 2675 return 0;
1da177e4
LT
2676}
2677
96541bac 2678static void mmc_blk_remove(struct mmc_card *card)
1da177e4 2679{
96541bac 2680 struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
1da177e4 2681
371a689f 2682 mmc_blk_remove_parts(card, md);
e94cfef6 2683 pm_runtime_get_sync(&card->dev);
ddd6fa7e
AH
2684 mmc_claim_host(card->host);
2685 mmc_blk_part_switch(card, md);
2686 mmc_release_host(card->host);
e94cfef6
UH
2687 if (card->type != MMC_TYPE_SD_COMBO)
2688 pm_runtime_disable(&card->dev);
2689 pm_runtime_put_noidle(&card->dev);
371a689f 2690 mmc_blk_remove_req(md);
96541bac 2691 dev_set_drvdata(&card->dev, NULL);
1da177e4
LT
2692}
2693
96541bac 2694static int _mmc_blk_suspend(struct mmc_card *card)
1da177e4 2695{
371a689f 2696 struct mmc_blk_data *part_md;
96541bac 2697 struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
1da177e4
LT
2698
2699 if (md) {
2700 mmc_queue_suspend(&md->queue);
371a689f
AW
2701 list_for_each_entry(part_md, &md->part, part) {
2702 mmc_queue_suspend(&part_md->queue);
2703 }
1da177e4
LT
2704 }
2705 return 0;
2706}
2707
96541bac 2708static void mmc_blk_shutdown(struct mmc_card *card)
76287748 2709{
96541bac 2710 _mmc_blk_suspend(card);
76287748
UH
2711}
2712
0967edc6
UH
2713#ifdef CONFIG_PM_SLEEP
2714static int mmc_blk_suspend(struct device *dev)
76287748 2715{
96541bac
UH
2716 struct mmc_card *card = mmc_dev_to_card(dev);
2717
2718 return _mmc_blk_suspend(card);
76287748
UH
2719}
2720
0967edc6 2721static int mmc_blk_resume(struct device *dev)
1da177e4 2722{
371a689f 2723 struct mmc_blk_data *part_md;
fc95e30b 2724 struct mmc_blk_data *md = dev_get_drvdata(dev);
1da177e4
LT
2725
2726 if (md) {
371a689f
AW
2727 /*
2728 * Resume involves the card going into idle state,
2729 * so current partition is always the main one.
2730 */
2731 md->part_curr = md->part_type;
1da177e4 2732 mmc_queue_resume(&md->queue);
371a689f
AW
2733 list_for_each_entry(part_md, &md->part, part) {
2734 mmc_queue_resume(&part_md->queue);
2735 }
1da177e4
LT
2736 }
2737 return 0;
2738}
1da177e4
LT
2739#endif
2740
0967edc6
UH
2741static SIMPLE_DEV_PM_OPS(mmc_blk_pm_ops, mmc_blk_suspend, mmc_blk_resume);
2742
96541bac
UH
2743static struct mmc_driver mmc_driver = {
2744 .drv = {
2745 .name = "mmcblk",
2746 .pm = &mmc_blk_pm_ops,
2747 },
1da177e4
LT
2748 .probe = mmc_blk_probe,
2749 .remove = mmc_blk_remove,
76287748 2750 .shutdown = mmc_blk_shutdown,
1da177e4
LT
2751};
2752
2753static int __init mmc_blk_init(void)
2754{
9d4e98e9 2755 int res;
1da177e4 2756
5e71b7a6
OJ
2757 if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
2758 pr_info("mmcblk: using %d minors per device\n", perdev_minors);
2759
a26eba61 2760 max_devices = min(MAX_DEVICES, (1 << MINORBITS) / perdev_minors);
5e71b7a6 2761
fe6b4c88
PO
2762 res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
2763 if (res)
1da177e4 2764 goto out;
1da177e4 2765
9d4e98e9
AM
2766 res = mmc_register_driver(&mmc_driver);
2767 if (res)
2768 goto out2;
1da177e4 2769
9d4e98e9
AM
2770 return 0;
2771 out2:
2772 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
1da177e4
LT
2773 out:
2774 return res;
2775}
2776
2777static void __exit mmc_blk_exit(void)
2778{
2779 mmc_unregister_driver(&mmc_driver);
fe6b4c88 2780 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
1da177e4
LT
2781}
2782
2783module_init(mmc_blk_init);
2784module_exit(mmc_blk_exit);
2785
2786MODULE_LICENSE("GPL");
2787MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
2788