]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Block driver for media (i.e., flash cards) | |
3 | * | |
4 | * Copyright 2002 Hewlett-Packard Company | |
979ce720 | 5 | * Copyright 2005-2008 Pierre Ossman |
1da177e4 LT |
6 | * |
7 | * Use consistent with the GNU GPL is permitted, | |
8 | * provided that this copyright notice is | |
9 | * preserved in its entirety in all copies and derived works. | |
10 | * | |
11 | * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED, | |
12 | * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS | |
13 | * FITNESS FOR ANY PARTICULAR PURPOSE. | |
14 | * | |
15 | * Many thanks to Alessandro Rubini and Jonathan Corbet! | |
16 | * | |
17 | * Author: Andrew Christian | |
18 | * 28 May 2002 | |
19 | */ | |
20 | #include <linux/moduleparam.h> | |
21 | #include <linux/module.h> | |
22 | #include <linux/init.h> | |
23 | ||
1da177e4 LT |
24 | #include <linux/kernel.h> |
25 | #include <linux/fs.h> | |
5a0e3ad6 | 26 | #include <linux/slab.h> |
1da177e4 LT |
27 | #include <linux/errno.h> |
28 | #include <linux/hdreg.h> | |
29 | #include <linux/kdev_t.h> | |
30 | #include <linux/blkdev.h> | |
97548575 | 31 | #include <linux/cdev.h> |
a621aaed | 32 | #include <linux/mutex.h> |
ec5a19dd | 33 | #include <linux/scatterlist.h> |
a7bbb573 | 34 | #include <linux/string_helpers.h> |
cb87ea28 JC |
35 | #include <linux/delay.h> |
36 | #include <linux/capability.h> | |
37 | #include <linux/compat.h> | |
e94cfef6 | 38 | #include <linux/pm_runtime.h> |
b10fa99e | 39 | #include <linux/idr.h> |
627c3ccf | 40 | #include <linux/debugfs.h> |
1da177e4 | 41 | |
cb87ea28 | 42 | #include <linux/mmc/ioctl.h> |
1da177e4 | 43 | #include <linux/mmc/card.h> |
385e3227 | 44 | #include <linux/mmc/host.h> |
da7fbe58 PO |
45 | #include <linux/mmc/mmc.h> |
46 | #include <linux/mmc/sd.h> | |
1da177e4 | 47 | |
7c0f6ba6 | 48 | #include <linux/uaccess.h> |
1da177e4 | 49 | |
98ac2162 | 50 | #include "queue.h" |
48ab086d | 51 | #include "block.h" |
55244c56 | 52 | #include "core.h" |
4facdde1 | 53 | #include "card.h" |
5857b29b | 54 | #include "host.h" |
4facdde1 | 55 | #include "bus.h" |
55244c56 | 56 | #include "mmc_ops.h" |
28fc64af | 57 | #include "quirks.h" |
55244c56 | 58 | #include "sd_ops.h" |
1da177e4 | 59 | |
6b0b6285 | 60 | MODULE_ALIAS("mmc:block"); |
5e71b7a6 OJ |
61 | #ifdef MODULE_PARAM_PREFIX |
62 | #undef MODULE_PARAM_PREFIX | |
63 | #endif | |
64 | #define MODULE_PARAM_PREFIX "mmcblk." | |
65 | ||
6b7a363d AH |
66 | /* |
67 | * Set a 10 second timeout for polling write request busy state. Note, mmc core | |
68 | * is setting a 3 second timeout for SD cards, and SDHCI has long had a 10 | |
69 | * second software timer to timeout the whole request, so 10 seconds should be | |
70 | * ample. | |
71 | */ | |
72 | #define MMC_BLK_TIMEOUT_MS (10 * 1000) | |
775a9362 ME |
73 | #define MMC_SANITIZE_REQ_TIMEOUT 240000 |
74 | #define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16) | |
e74ef219 | 75 | #define MMC_EXTRACT_VALUE_FROM_ARG(x) ((x & 0x0000FF00) >> 8) |
6a7a6b45 | 76 | |
d3df0465 | 77 | #define mmc_req_rel_wr(req) ((req->cmd_flags & REQ_FUA) && \ |
ce39f9d1 | 78 | (rq_data_dir(req) == WRITE)) |
5e71b7a6 | 79 | static DEFINE_MUTEX(block_mutex); |
6b0b6285 | 80 | |
1da177e4 | 81 | /* |
5e71b7a6 OJ |
82 | * The defaults come from config options but can be overriden by module |
83 | * or bootarg options. | |
1da177e4 | 84 | */ |
5e71b7a6 | 85 | static int perdev_minors = CONFIG_MMC_BLOCK_MINORS; |
1dff3144 | 86 | |
5e71b7a6 OJ |
87 | /* |
88 | * We've only got one major, so number of mmcblk devices is | |
a26eba61 | 89 | * limited to (1 << 20) / number of minors per device. It is also |
b10fa99e | 90 | * limited by the MAX_DEVICES below. |
5e71b7a6 OJ |
91 | */ |
92 | static int max_devices; | |
93 | ||
a26eba61 BH |
94 | #define MAX_DEVICES 256 |
95 | ||
b10fa99e | 96 | static DEFINE_IDA(mmc_blk_ida); |
97548575 | 97 | static DEFINE_IDA(mmc_rpmb_ida); |
1da177e4 | 98 | |
1da177e4 LT |
99 | /* |
100 | * There is one mmc_blk_data per slot. | |
101 | */ | |
102 | struct mmc_blk_data { | |
103 | spinlock_t lock; | |
307d8e6f | 104 | struct device *parent; |
1da177e4 LT |
105 | struct gendisk *disk; |
106 | struct mmc_queue queue; | |
371a689f | 107 | struct list_head part; |
97548575 | 108 | struct list_head rpmbs; |
1da177e4 | 109 | |
d0c97cfb AW |
110 | unsigned int flags; |
111 | #define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */ | |
112 | #define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */ | |
113 | ||
1da177e4 | 114 | unsigned int usage; |
a6f6c96b | 115 | unsigned int read_only; |
371a689f | 116 | unsigned int part_type; |
67716327 AH |
117 | unsigned int reset_done; |
118 | #define MMC_BLK_READ BIT(0) | |
119 | #define MMC_BLK_WRITE BIT(1) | |
120 | #define MMC_BLK_DISCARD BIT(2) | |
121 | #define MMC_BLK_SECDISCARD BIT(3) | |
1e8e55b6 | 122 | #define MMC_BLK_CQE_RECOVERY BIT(4) |
371a689f AW |
123 | |
124 | /* | |
125 | * Only set in main mmc_blk_data associated | |
fc95e30b | 126 | * with mmc_card with dev_set_drvdata, and keeps |
371a689f AW |
127 | * track of the current selected device partition. |
128 | */ | |
129 | unsigned int part_curr; | |
130 | struct device_attribute force_ro; | |
add710ea JR |
131 | struct device_attribute power_ro_lock; |
132 | int area_type; | |
f9f0da98 AH |
133 | |
134 | /* debugfs files (only in main mmc_blk_data) */ | |
135 | struct dentry *status_dentry; | |
136 | struct dentry *ext_csd_dentry; | |
1da177e4 LT |
137 | }; |
138 | ||
97548575 LW |
139 | /* Device type for RPMB character devices */ |
140 | static dev_t mmc_rpmb_devt; | |
141 | ||
142 | /* Bus type for RPMB character devices */ | |
143 | static struct bus_type mmc_rpmb_bus_type = { | |
144 | .name = "mmc_rpmb", | |
145 | }; | |
146 | ||
147 | /** | |
148 | * struct mmc_rpmb_data - special RPMB device type for these areas | |
149 | * @dev: the device for the RPMB area | |
150 | * @chrdev: character device for the RPMB area | |
151 | * @id: unique device ID number | |
152 | * @part_index: partition index (0 on first) | |
153 | * @md: parent MMC block device | |
154 | * @node: list item, so we can put this device on a list | |
155 | */ | |
156 | struct mmc_rpmb_data { | |
157 | struct device dev; | |
158 | struct cdev chrdev; | |
159 | int id; | |
160 | unsigned int part_index; | |
161 | struct mmc_blk_data *md; | |
162 | struct list_head node; | |
163 | }; | |
164 | ||
a621aaed | 165 | static DEFINE_MUTEX(open_lock); |
1da177e4 | 166 | |
5e71b7a6 OJ |
167 | module_param(perdev_minors, int, 0444); |
168 | MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device"); | |
169 | ||
8d1e977d | 170 | static inline int mmc_blk_part_switch(struct mmc_card *card, |
1f797edc | 171 | unsigned int part_type); |
cdf8a6fb | 172 | |
1da177e4 LT |
173 | static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk) |
174 | { | |
175 | struct mmc_blk_data *md; | |
176 | ||
a621aaed | 177 | mutex_lock(&open_lock); |
1da177e4 LT |
178 | md = disk->private_data; |
179 | if (md && md->usage == 0) | |
180 | md = NULL; | |
181 | if (md) | |
182 | md->usage++; | |
a621aaed | 183 | mutex_unlock(&open_lock); |
1da177e4 LT |
184 | |
185 | return md; | |
186 | } | |
187 | ||
371a689f AW |
188 | static inline int mmc_get_devidx(struct gendisk *disk) |
189 | { | |
382c55f8 | 190 | int devidx = disk->first_minor / perdev_minors; |
371a689f AW |
191 | return devidx; |
192 | } | |
193 | ||
1da177e4 LT |
194 | static void mmc_blk_put(struct mmc_blk_data *md) |
195 | { | |
a621aaed | 196 | mutex_lock(&open_lock); |
1da177e4 LT |
197 | md->usage--; |
198 | if (md->usage == 0) { | |
371a689f | 199 | int devidx = mmc_get_devidx(md->disk); |
41e3efd0 | 200 | blk_put_queue(md->queue.queue); |
a04848c7 | 201 | ida_simple_remove(&mmc_blk_ida, devidx); |
1da177e4 | 202 | put_disk(md->disk); |
1da177e4 LT |
203 | kfree(md); |
204 | } | |
a621aaed | 205 | mutex_unlock(&open_lock); |
1da177e4 LT |
206 | } |
207 | ||
add710ea JR |
208 | static ssize_t power_ro_lock_show(struct device *dev, |
209 | struct device_attribute *attr, char *buf) | |
210 | { | |
211 | int ret; | |
212 | struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); | |
213 | struct mmc_card *card = md->queue.card; | |
214 | int locked = 0; | |
215 | ||
216 | if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN) | |
217 | locked = 2; | |
218 | else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN) | |
219 | locked = 1; | |
220 | ||
221 | ret = snprintf(buf, PAGE_SIZE, "%d\n", locked); | |
222 | ||
9098f84c TW |
223 | mmc_blk_put(md); |
224 | ||
add710ea JR |
225 | return ret; |
226 | } | |
227 | ||
228 | static ssize_t power_ro_lock_store(struct device *dev, | |
229 | struct device_attribute *attr, const char *buf, size_t count) | |
230 | { | |
231 | int ret; | |
232 | struct mmc_blk_data *md, *part_md; | |
0493f6fe LW |
233 | struct mmc_queue *mq; |
234 | struct request *req; | |
add710ea JR |
235 | unsigned long set; |
236 | ||
237 | if (kstrtoul(buf, 0, &set)) | |
238 | return -EINVAL; | |
239 | ||
240 | if (set != 1) | |
241 | return count; | |
242 | ||
243 | md = mmc_blk_get(dev_to_disk(dev)); | |
0493f6fe | 244 | mq = &md->queue; |
add710ea | 245 | |
0493f6fe LW |
246 | /* Dispatch locking to the block layer */ |
247 | req = blk_get_request(mq->queue, REQ_OP_DRV_OUT, __GFP_RECLAIM); | |
fb8e456e AH |
248 | if (IS_ERR(req)) { |
249 | count = PTR_ERR(req); | |
250 | goto out_put; | |
251 | } | |
0493f6fe LW |
252 | req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP; |
253 | blk_execute_rq(mq->queue, NULL, req, 0); | |
254 | ret = req_to_mmc_queue_req(req)->drv_op_result; | |
34c089e8 | 255 | blk_put_request(req); |
add710ea JR |
256 | |
257 | if (!ret) { | |
258 | pr_info("%s: Locking boot partition ro until next power on\n", | |
259 | md->disk->disk_name); | |
260 | set_disk_ro(md->disk, 1); | |
261 | ||
262 | list_for_each_entry(part_md, &md->part, part) | |
263 | if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) { | |
264 | pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name); | |
265 | set_disk_ro(part_md->disk, 1); | |
266 | } | |
267 | } | |
fb8e456e | 268 | out_put: |
add710ea JR |
269 | mmc_blk_put(md); |
270 | return count; | |
271 | } | |
272 | ||
371a689f AW |
273 | static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr, |
274 | char *buf) | |
275 | { | |
276 | int ret; | |
277 | struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); | |
278 | ||
0031a98a | 279 | ret = snprintf(buf, PAGE_SIZE, "%d\n", |
371a689f AW |
280 | get_disk_ro(dev_to_disk(dev)) ^ |
281 | md->read_only); | |
282 | mmc_blk_put(md); | |
283 | return ret; | |
284 | } | |
285 | ||
286 | static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr, | |
287 | const char *buf, size_t count) | |
288 | { | |
289 | int ret; | |
290 | char *end; | |
291 | struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); | |
292 | unsigned long set = simple_strtoul(buf, &end, 0); | |
293 | if (end == buf) { | |
294 | ret = -EINVAL; | |
295 | goto out; | |
296 | } | |
297 | ||
298 | set_disk_ro(dev_to_disk(dev), set || md->read_only); | |
299 | ret = count; | |
300 | out: | |
301 | mmc_blk_put(md); | |
302 | return ret; | |
303 | } | |
304 | ||
a5a1561f | 305 | static int mmc_blk_open(struct block_device *bdev, fmode_t mode) |
1da177e4 | 306 | { |
a5a1561f | 307 | struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk); |
1da177e4 LT |
308 | int ret = -ENXIO; |
309 | ||
2a48fc0a | 310 | mutex_lock(&block_mutex); |
1da177e4 LT |
311 | if (md) { |
312 | if (md->usage == 2) | |
a5a1561f | 313 | check_disk_change(bdev); |
1da177e4 | 314 | ret = 0; |
a00fc090 | 315 | |
a5a1561f | 316 | if ((mode & FMODE_WRITE) && md->read_only) { |
70bb0896 | 317 | mmc_blk_put(md); |
a00fc090 | 318 | ret = -EROFS; |
70bb0896 | 319 | } |
1da177e4 | 320 | } |
2a48fc0a | 321 | mutex_unlock(&block_mutex); |
1da177e4 LT |
322 | |
323 | return ret; | |
324 | } | |
325 | ||
db2a144b | 326 | static void mmc_blk_release(struct gendisk *disk, fmode_t mode) |
1da177e4 | 327 | { |
a5a1561f | 328 | struct mmc_blk_data *md = disk->private_data; |
1da177e4 | 329 | |
2a48fc0a | 330 | mutex_lock(&block_mutex); |
1da177e4 | 331 | mmc_blk_put(md); |
2a48fc0a | 332 | mutex_unlock(&block_mutex); |
1da177e4 LT |
333 | } |
334 | ||
335 | static int | |
a885c8c4 | 336 | mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) |
1da177e4 | 337 | { |
a885c8c4 CH |
338 | geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16); |
339 | geo->heads = 4; | |
340 | geo->sectors = 16; | |
341 | return 0; | |
1da177e4 LT |
342 | } |
343 | ||
cb87ea28 JC |
344 | struct mmc_blk_ioc_data { |
345 | struct mmc_ioc_cmd ic; | |
346 | unsigned char *buf; | |
347 | u64 buf_bytes; | |
97548575 | 348 | struct mmc_rpmb_data *rpmb; |
cb87ea28 JC |
349 | }; |
350 | ||
351 | static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user( | |
352 | struct mmc_ioc_cmd __user *user) | |
353 | { | |
354 | struct mmc_blk_ioc_data *idata; | |
355 | int err; | |
356 | ||
1ff8950c | 357 | idata = kmalloc(sizeof(*idata), GFP_KERNEL); |
cb87ea28 JC |
358 | if (!idata) { |
359 | err = -ENOMEM; | |
aea253ec | 360 | goto out; |
cb87ea28 JC |
361 | } |
362 | ||
363 | if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) { | |
364 | err = -EFAULT; | |
aea253ec | 365 | goto idata_err; |
cb87ea28 JC |
366 | } |
367 | ||
368 | idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks; | |
369 | if (idata->buf_bytes > MMC_IOC_MAX_BYTES) { | |
370 | err = -EOVERFLOW; | |
aea253ec | 371 | goto idata_err; |
cb87ea28 JC |
372 | } |
373 | ||
bfe5b1b1 VV |
374 | if (!idata->buf_bytes) { |
375 | idata->buf = NULL; | |
4d6144de | 376 | return idata; |
bfe5b1b1 | 377 | } |
4d6144de | 378 | |
1ff8950c | 379 | idata->buf = kmalloc(idata->buf_bytes, GFP_KERNEL); |
cb87ea28 JC |
380 | if (!idata->buf) { |
381 | err = -ENOMEM; | |
aea253ec | 382 | goto idata_err; |
cb87ea28 JC |
383 | } |
384 | ||
385 | if (copy_from_user(idata->buf, (void __user *)(unsigned long) | |
386 | idata->ic.data_ptr, idata->buf_bytes)) { | |
387 | err = -EFAULT; | |
388 | goto copy_err; | |
389 | } | |
390 | ||
391 | return idata; | |
392 | ||
393 | copy_err: | |
394 | kfree(idata->buf); | |
aea253ec | 395 | idata_err: |
cb87ea28 | 396 | kfree(idata); |
aea253ec | 397 | out: |
cb87ea28 | 398 | return ERR_PTR(err); |
cb87ea28 JC |
399 | } |
400 | ||
a5f5774c JH |
401 | static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr, |
402 | struct mmc_blk_ioc_data *idata) | |
403 | { | |
404 | struct mmc_ioc_cmd *ic = &idata->ic; | |
405 | ||
406 | if (copy_to_user(&(ic_ptr->response), ic->response, | |
407 | sizeof(ic->response))) | |
408 | return -EFAULT; | |
409 | ||
410 | if (!idata->ic.write_flag) { | |
411 | if (copy_to_user((void __user *)(unsigned long)ic->data_ptr, | |
412 | idata->buf, idata->buf_bytes)) | |
413 | return -EFAULT; | |
414 | } | |
415 | ||
416 | return 0; | |
417 | } | |
418 | ||
8d1e977d LP |
419 | static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status, |
420 | u32 retries_max) | |
421 | { | |
422 | int err; | |
423 | u32 retry_count = 0; | |
424 | ||
425 | if (!status || !retries_max) | |
426 | return -EINVAL; | |
427 | ||
428 | do { | |
2185bc2c | 429 | err = __mmc_send_status(card, status, 5); |
8d1e977d LP |
430 | if (err) |
431 | break; | |
432 | ||
433 | if (!R1_STATUS(*status) && | |
434 | (R1_CURRENT_STATE(*status) != R1_STATE_PRG)) | |
435 | break; /* RPMB programming operation complete */ | |
436 | ||
437 | /* | |
438 | * Rechedule to give the MMC device a chance to continue | |
439 | * processing the previous command without being polled too | |
440 | * frequently. | |
441 | */ | |
442 | usleep_range(1000, 5000); | |
443 | } while (++retry_count < retries_max); | |
444 | ||
445 | if (retry_count == retries_max) | |
446 | err = -EPERM; | |
447 | ||
448 | return err; | |
449 | } | |
450 | ||
775a9362 ME |
451 | static int ioctl_do_sanitize(struct mmc_card *card) |
452 | { | |
453 | int err; | |
454 | ||
a2d1086d | 455 | if (!mmc_can_sanitize(card)) { |
775a9362 ME |
456 | pr_warn("%s: %s - SANITIZE is not supported\n", |
457 | mmc_hostname(card->host), __func__); | |
458 | err = -EOPNOTSUPP; | |
459 | goto out; | |
460 | } | |
461 | ||
462 | pr_debug("%s: %s - SANITIZE IN PROGRESS...\n", | |
463 | mmc_hostname(card->host), __func__); | |
464 | ||
465 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | |
466 | EXT_CSD_SANITIZE_START, 1, | |
467 | MMC_SANITIZE_REQ_TIMEOUT); | |
468 | ||
469 | if (err) | |
470 | pr_err("%s: %s - EXT_CSD_SANITIZE_START failed. err=%d\n", | |
471 | mmc_hostname(card->host), __func__, err); | |
472 | ||
473 | pr_debug("%s: %s - SANITIZE COMPLETED\n", mmc_hostname(card->host), | |
474 | __func__); | |
475 | out: | |
476 | return err; | |
477 | } | |
478 | ||
a5f5774c JH |
479 | static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, |
480 | struct mmc_blk_ioc_data *idata) | |
cb87ea28 | 481 | { |
c7836d15 MY |
482 | struct mmc_command cmd = {}; |
483 | struct mmc_data data = {}; | |
484 | struct mmc_request mrq = {}; | |
cb87ea28 JC |
485 | struct scatterlist sg; |
486 | int err; | |
97548575 | 487 | unsigned int target_part; |
8d1e977d | 488 | u32 status = 0; |
cb87ea28 | 489 | |
a5f5774c JH |
490 | if (!card || !md || !idata) |
491 | return -EINVAL; | |
cb87ea28 | 492 | |
97548575 LW |
493 | /* |
494 | * The RPMB accesses comes in from the character device, so we | |
495 | * need to target these explicitly. Else we just target the | |
496 | * partition type for the block device the ioctl() was issued | |
497 | * on. | |
498 | */ | |
499 | if (idata->rpmb) { | |
500 | /* Support multiple RPMB partitions */ | |
501 | target_part = idata->rpmb->part_index; | |
502 | target_part |= EXT_CSD_PART_CONFIG_ACC_RPMB; | |
503 | } else { | |
504 | target_part = md->part_type; | |
505 | } | |
8d1e977d | 506 | |
4d6144de JR |
507 | cmd.opcode = idata->ic.opcode; |
508 | cmd.arg = idata->ic.arg; | |
509 | cmd.flags = idata->ic.flags; | |
510 | ||
511 | if (idata->buf_bytes) { | |
512 | data.sg = &sg; | |
513 | data.sg_len = 1; | |
514 | data.blksz = idata->ic.blksz; | |
515 | data.blocks = idata->ic.blocks; | |
516 | ||
517 | sg_init_one(data.sg, idata->buf, idata->buf_bytes); | |
518 | ||
519 | if (idata->ic.write_flag) | |
520 | data.flags = MMC_DATA_WRITE; | |
521 | else | |
522 | data.flags = MMC_DATA_READ; | |
523 | ||
524 | /* data.flags must already be set before doing this. */ | |
525 | mmc_set_data_timeout(&data, card); | |
526 | ||
527 | /* Allow overriding the timeout_ns for empirical tuning. */ | |
528 | if (idata->ic.data_timeout_ns) | |
529 | data.timeout_ns = idata->ic.data_timeout_ns; | |
530 | ||
531 | if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) { | |
532 | /* | |
533 | * Pretend this is a data transfer and rely on the | |
534 | * host driver to compute timeout. When all host | |
535 | * drivers support cmd.cmd_timeout for R1B, this | |
536 | * can be changed to: | |
537 | * | |
538 | * mrq.data = NULL; | |
539 | * cmd.cmd_timeout = idata->ic.cmd_timeout_ms; | |
540 | */ | |
541 | data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000; | |
542 | } | |
543 | ||
544 | mrq.data = &data; | |
545 | } | |
546 | ||
547 | mrq.cmd = &cmd; | |
548 | ||
97548575 | 549 | err = mmc_blk_part_switch(card, target_part); |
8d1e977d | 550 | if (err) |
a5f5774c | 551 | return err; |
8d1e977d | 552 | |
cb87ea28 JC |
553 | if (idata->ic.is_acmd) { |
554 | err = mmc_app_cmd(card->host, card); | |
555 | if (err) | |
a5f5774c | 556 | return err; |
cb87ea28 JC |
557 | } |
558 | ||
97548575 | 559 | if (idata->rpmb) { |
8d1e977d LP |
560 | err = mmc_set_blockcount(card, data.blocks, |
561 | idata->ic.write_flag & (1 << 31)); | |
562 | if (err) | |
a5f5774c | 563 | return err; |
8d1e977d LP |
564 | } |
565 | ||
a82e484e YG |
566 | if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) && |
567 | (cmd.opcode == MMC_SWITCH)) { | |
775a9362 ME |
568 | err = ioctl_do_sanitize(card); |
569 | ||
570 | if (err) | |
571 | pr_err("%s: ioctl_do_sanitize() failed. err = %d", | |
572 | __func__, err); | |
573 | ||
a5f5774c | 574 | return err; |
775a9362 ME |
575 | } |
576 | ||
cb87ea28 JC |
577 | mmc_wait_for_req(card->host, &mrq); |
578 | ||
579 | if (cmd.error) { | |
580 | dev_err(mmc_dev(card->host), "%s: cmd error %d\n", | |
581 | __func__, cmd.error); | |
a5f5774c | 582 | return cmd.error; |
cb87ea28 JC |
583 | } |
584 | if (data.error) { | |
585 | dev_err(mmc_dev(card->host), "%s: data error %d\n", | |
586 | __func__, data.error); | |
a5f5774c | 587 | return data.error; |
cb87ea28 JC |
588 | } |
589 | ||
e74ef219 BS |
590 | /* |
591 | * Make sure the cache of the PARTITION_CONFIG register and | |
592 | * PARTITION_ACCESS bits is updated in case the ioctl ext_csd write | |
593 | * changed it successfully. | |
594 | */ | |
595 | if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_PART_CONFIG) && | |
596 | (cmd.opcode == MMC_SWITCH)) { | |
597 | struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev); | |
598 | u8 value = MMC_EXTRACT_VALUE_FROM_ARG(cmd.arg); | |
599 | ||
600 | /* | |
601 | * Update cache so the next mmc_blk_part_switch call operates | |
602 | * on up-to-date data. | |
603 | */ | |
604 | card->ext_csd.part_config = value; | |
605 | main_md->part_curr = value & EXT_CSD_PART_CONFIG_ACC_MASK; | |
606 | } | |
607 | ||
cb87ea28 JC |
608 | /* |
609 | * According to the SD specs, some commands require a delay after | |
610 | * issuing the command. | |
611 | */ | |
612 | if (idata->ic.postsleep_min_us) | |
613 | usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us); | |
614 | ||
a5f5774c | 615 | memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp)); |
cb87ea28 | 616 | |
97548575 | 617 | if (idata->rpmb) { |
8d1e977d LP |
618 | /* |
619 | * Ensure RPMB command has completed by polling CMD13 | |
620 | * "Send Status". | |
621 | */ | |
622 | err = ioctl_rpmb_card_status_poll(card, &status, 5); | |
623 | if (err) | |
624 | dev_err(mmc_dev(card->host), | |
625 | "%s: Card Status=0x%08X, error %d\n", | |
626 | __func__, status, err); | |
627 | } | |
628 | ||
a5f5774c JH |
629 | return err; |
630 | } | |
631 | ||
2fe20bae | 632 | static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md, |
97548575 LW |
633 | struct mmc_ioc_cmd __user *ic_ptr, |
634 | struct mmc_rpmb_data *rpmb) | |
a5f5774c JH |
635 | { |
636 | struct mmc_blk_ioc_data *idata; | |
3ecd8cf2 | 637 | struct mmc_blk_ioc_data *idatas[1]; |
614f0388 | 638 | struct mmc_queue *mq; |
a5f5774c | 639 | struct mmc_card *card; |
b093410c | 640 | int err = 0, ioc_err = 0; |
614f0388 | 641 | struct request *req; |
a5f5774c JH |
642 | |
643 | idata = mmc_blk_ioctl_copy_from_user(ic_ptr); | |
644 | if (IS_ERR(idata)) | |
645 | return PTR_ERR(idata); | |
97548575 LW |
646 | /* This will be NULL on non-RPMB ioctl():s */ |
647 | idata->rpmb = rpmb; | |
a5f5774c | 648 | |
a5f5774c JH |
649 | card = md->queue.card; |
650 | if (IS_ERR(card)) { | |
651 | err = PTR_ERR(card); | |
652 | goto cmd_done; | |
653 | } | |
654 | ||
614f0388 LW |
655 | /* |
656 | * Dispatch the ioctl() into the block request queue. | |
657 | */ | |
658 | mq = &md->queue; | |
659 | req = blk_get_request(mq->queue, | |
660 | idata->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, | |
661 | __GFP_RECLAIM); | |
fb8e456e AH |
662 | if (IS_ERR(req)) { |
663 | err = PTR_ERR(req); | |
664 | goto cmd_done; | |
665 | } | |
3ecd8cf2 | 666 | idatas[0] = idata; |
97548575 LW |
667 | req_to_mmc_queue_req(req)->drv_op = |
668 | rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL; | |
69f7599e | 669 | req_to_mmc_queue_req(req)->drv_op_data = idatas; |
3ecd8cf2 | 670 | req_to_mmc_queue_req(req)->ioc_count = 1; |
614f0388 | 671 | blk_execute_rq(mq->queue, NULL, req, 0); |
0493f6fe | 672 | ioc_err = req_to_mmc_queue_req(req)->drv_op_result; |
b093410c | 673 | err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata); |
614f0388 | 674 | blk_put_request(req); |
a5f5774c | 675 | |
cb87ea28 | 676 | cmd_done: |
cb87ea28 JC |
677 | kfree(idata->buf); |
678 | kfree(idata); | |
b093410c | 679 | return ioc_err ? ioc_err : err; |
cb87ea28 JC |
680 | } |
681 | ||
2fe20bae | 682 | static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md, |
97548575 LW |
683 | struct mmc_ioc_multi_cmd __user *user, |
684 | struct mmc_rpmb_data *rpmb) | |
a5f5774c JH |
685 | { |
686 | struct mmc_blk_ioc_data **idata = NULL; | |
687 | struct mmc_ioc_cmd __user *cmds = user->cmds; | |
688 | struct mmc_card *card; | |
3ecd8cf2 | 689 | struct mmc_queue *mq; |
b093410c | 690 | int i, err = 0, ioc_err = 0; |
a5f5774c | 691 | __u64 num_of_cmds; |
3ecd8cf2 | 692 | struct request *req; |
a5f5774c JH |
693 | |
694 | if (copy_from_user(&num_of_cmds, &user->num_of_cmds, | |
695 | sizeof(num_of_cmds))) | |
696 | return -EFAULT; | |
697 | ||
aab2ee03 GU |
698 | if (!num_of_cmds) |
699 | return 0; | |
700 | ||
a5f5774c JH |
701 | if (num_of_cmds > MMC_IOC_MAX_CMDS) |
702 | return -EINVAL; | |
703 | ||
704 | idata = kcalloc(num_of_cmds, sizeof(*idata), GFP_KERNEL); | |
705 | if (!idata) | |
706 | return -ENOMEM; | |
707 | ||
708 | for (i = 0; i < num_of_cmds; i++) { | |
709 | idata[i] = mmc_blk_ioctl_copy_from_user(&cmds[i]); | |
710 | if (IS_ERR(idata[i])) { | |
711 | err = PTR_ERR(idata[i]); | |
712 | num_of_cmds = i; | |
713 | goto cmd_err; | |
714 | } | |
97548575 LW |
715 | /* This will be NULL on non-RPMB ioctl():s */ |
716 | idata[i]->rpmb = rpmb; | |
a5f5774c JH |
717 | } |
718 | ||
a5f5774c JH |
719 | card = md->queue.card; |
720 | if (IS_ERR(card)) { | |
721 | err = PTR_ERR(card); | |
2fe20bae | 722 | goto cmd_err; |
a5f5774c JH |
723 | } |
724 | ||
a5f5774c | 725 | |
3ecd8cf2 LW |
726 | /* |
727 | * Dispatch the ioctl()s into the block request queue. | |
728 | */ | |
729 | mq = &md->queue; | |
730 | req = blk_get_request(mq->queue, | |
731 | idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, | |
732 | __GFP_RECLAIM); | |
fb8e456e AH |
733 | if (IS_ERR(req)) { |
734 | err = PTR_ERR(req); | |
735 | goto cmd_err; | |
736 | } | |
97548575 LW |
737 | req_to_mmc_queue_req(req)->drv_op = |
738 | rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL; | |
69f7599e | 739 | req_to_mmc_queue_req(req)->drv_op_data = idata; |
3ecd8cf2 LW |
740 | req_to_mmc_queue_req(req)->ioc_count = num_of_cmds; |
741 | blk_execute_rq(mq->queue, NULL, req, 0); | |
0493f6fe | 742 | ioc_err = req_to_mmc_queue_req(req)->drv_op_result; |
a5f5774c JH |
743 | |
744 | /* copy to user if data and response */ | |
b093410c | 745 | for (i = 0; i < num_of_cmds && !err; i++) |
a5f5774c | 746 | err = mmc_blk_ioctl_copy_to_user(&cmds[i], idata[i]); |
a5f5774c | 747 | |
3ecd8cf2 LW |
748 | blk_put_request(req); |
749 | ||
a5f5774c JH |
750 | cmd_err: |
751 | for (i = 0; i < num_of_cmds; i++) { | |
752 | kfree(idata[i]->buf); | |
753 | kfree(idata[i]); | |
754 | } | |
755 | kfree(idata); | |
b093410c | 756 | return ioc_err ? ioc_err : err; |
a5f5774c JH |
757 | } |
758 | ||
61fe0e2b LW |
759 | static int mmc_blk_check_blkdev(struct block_device *bdev) |
760 | { | |
761 | /* | |
762 | * The caller must have CAP_SYS_RAWIO, and must be calling this on the | |
763 | * whole block device, not on a partition. This prevents overspray | |
764 | * between sibling partitions. | |
765 | */ | |
766 | if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains)) | |
767 | return -EPERM; | |
768 | return 0; | |
769 | } | |
770 | ||
cb87ea28 JC |
771 | static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode, |
772 | unsigned int cmd, unsigned long arg) | |
773 | { | |
2fe20bae | 774 | struct mmc_blk_data *md; |
61fe0e2b LW |
775 | int ret; |
776 | ||
a5f5774c JH |
777 | switch (cmd) { |
778 | case MMC_IOC_CMD: | |
61fe0e2b LW |
779 | ret = mmc_blk_check_blkdev(bdev); |
780 | if (ret) | |
781 | return ret; | |
2fe20bae LW |
782 | md = mmc_blk_get(bdev->bd_disk); |
783 | if (!md) | |
784 | return -EINVAL; | |
785 | ret = mmc_blk_ioctl_cmd(md, | |
97548575 LW |
786 | (struct mmc_ioc_cmd __user *)arg, |
787 | NULL); | |
2fe20bae LW |
788 | mmc_blk_put(md); |
789 | return ret; | |
a5f5774c | 790 | case MMC_IOC_MULTI_CMD: |
61fe0e2b LW |
791 | ret = mmc_blk_check_blkdev(bdev); |
792 | if (ret) | |
793 | return ret; | |
2fe20bae LW |
794 | md = mmc_blk_get(bdev->bd_disk); |
795 | if (!md) | |
796 | return -EINVAL; | |
797 | ret = mmc_blk_ioctl_multi_cmd(md, | |
97548575 LW |
798 | (struct mmc_ioc_multi_cmd __user *)arg, |
799 | NULL); | |
2fe20bae LW |
800 | mmc_blk_put(md); |
801 | return ret; | |
a5f5774c JH |
802 | default: |
803 | return -EINVAL; | |
804 | } | |
cb87ea28 JC |
805 | } |
806 | ||
807 | #ifdef CONFIG_COMPAT | |
808 | static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode, | |
809 | unsigned int cmd, unsigned long arg) | |
810 | { | |
811 | return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg)); | |
812 | } | |
813 | #endif | |
814 | ||
83d5cde4 | 815 | static const struct block_device_operations mmc_bdops = { |
a5a1561f AV |
816 | .open = mmc_blk_open, |
817 | .release = mmc_blk_release, | |
a885c8c4 | 818 | .getgeo = mmc_blk_getgeo, |
1da177e4 | 819 | .owner = THIS_MODULE, |
cb87ea28 JC |
820 | .ioctl = mmc_blk_ioctl, |
821 | #ifdef CONFIG_COMPAT | |
822 | .compat_ioctl = mmc_blk_compat_ioctl, | |
823 | #endif | |
1da177e4 LT |
824 | }; |
825 | ||
025e3d5f AH |
826 | static int mmc_blk_part_switch_pre(struct mmc_card *card, |
827 | unsigned int part_type) | |
828 | { | |
829 | int ret = 0; | |
830 | ||
831 | if (part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) { | |
832 | if (card->ext_csd.cmdq_en) { | |
833 | ret = mmc_cmdq_disable(card); | |
834 | if (ret) | |
835 | return ret; | |
836 | } | |
837 | mmc_retune_pause(card->host); | |
838 | } | |
839 | ||
840 | return ret; | |
841 | } | |
842 | ||
843 | static int mmc_blk_part_switch_post(struct mmc_card *card, | |
844 | unsigned int part_type) | |
845 | { | |
846 | int ret = 0; | |
847 | ||
848 | if (part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) { | |
849 | mmc_retune_unpause(card->host); | |
850 | if (card->reenable_cmdq && !card->ext_csd.cmdq_en) | |
851 | ret = mmc_cmdq_enable(card); | |
852 | } | |
853 | ||
854 | return ret; | |
855 | } | |
856 | ||
371a689f | 857 | static inline int mmc_blk_part_switch(struct mmc_card *card, |
1f797edc | 858 | unsigned int part_type) |
371a689f | 859 | { |
025e3d5f | 860 | int ret = 0; |
fc95e30b | 861 | struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev); |
0d7d85ca | 862 | |
1f797edc | 863 | if (main_md->part_curr == part_type) |
371a689f AW |
864 | return 0; |
865 | ||
866 | if (mmc_card_mmc(card)) { | |
0d7d85ca AH |
867 | u8 part_config = card->ext_csd.part_config; |
868 | ||
1f797edc | 869 | ret = mmc_blk_part_switch_pre(card, part_type); |
025e3d5f AH |
870 | if (ret) |
871 | return ret; | |
57da0c04 | 872 | |
0d7d85ca | 873 | part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK; |
1f797edc | 874 | part_config |= part_type; |
371a689f AW |
875 | |
876 | ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | |
0d7d85ca | 877 | EXT_CSD_PART_CONFIG, part_config, |
371a689f | 878 | card->ext_csd.part_time); |
57da0c04 | 879 | if (ret) { |
1f797edc | 880 | mmc_blk_part_switch_post(card, part_type); |
371a689f | 881 | return ret; |
57da0c04 | 882 | } |
0d7d85ca AH |
883 | |
884 | card->ext_csd.part_config = part_config; | |
57da0c04 | 885 | |
025e3d5f | 886 | ret = mmc_blk_part_switch_post(card, main_md->part_curr); |
67716327 | 887 | } |
371a689f | 888 | |
1f797edc | 889 | main_md->part_curr = part_type; |
025e3d5f | 890 | return ret; |
371a689f AW |
891 | } |
892 | ||
169f03a0 | 893 | static int mmc_sd_num_wr_blocks(struct mmc_card *card, u32 *written_blocks) |
ec5a19dd PO |
894 | { |
895 | int err; | |
051913da BD |
896 | u32 result; |
897 | __be32 *blocks; | |
ec5a19dd | 898 | |
c7836d15 MY |
899 | struct mmc_request mrq = {}; |
900 | struct mmc_command cmd = {}; | |
901 | struct mmc_data data = {}; | |
ec5a19dd PO |
902 | |
903 | struct scatterlist sg; | |
904 | ||
ec5a19dd PO |
905 | cmd.opcode = MMC_APP_CMD; |
906 | cmd.arg = card->rca << 16; | |
7213d175 | 907 | cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; |
ec5a19dd PO |
908 | |
909 | err = mmc_wait_for_cmd(card->host, &cmd, 0); | |
7213d175 | 910 | if (err) |
169f03a0 | 911 | return err; |
7213d175 | 912 | if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD)) |
169f03a0 | 913 | return -EIO; |
ec5a19dd PO |
914 | |
915 | memset(&cmd, 0, sizeof(struct mmc_command)); | |
916 | ||
917 | cmd.opcode = SD_APP_SEND_NUM_WR_BLKS; | |
918 | cmd.arg = 0; | |
7213d175 | 919 | cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; |
ec5a19dd | 920 | |
ec5a19dd PO |
921 | data.blksz = 4; |
922 | data.blocks = 1; | |
923 | data.flags = MMC_DATA_READ; | |
924 | data.sg = &sg; | |
925 | data.sg_len = 1; | |
d380443c | 926 | mmc_set_data_timeout(&data, card); |
ec5a19dd | 927 | |
ec5a19dd PO |
928 | mrq.cmd = &cmd; |
929 | mrq.data = &data; | |
930 | ||
051913da BD |
931 | blocks = kmalloc(4, GFP_KERNEL); |
932 | if (!blocks) | |
169f03a0 | 933 | return -ENOMEM; |
051913da BD |
934 | |
935 | sg_init_one(&sg, blocks, 4); | |
ec5a19dd PO |
936 | |
937 | mmc_wait_for_req(card->host, &mrq); | |
938 | ||
051913da BD |
939 | result = ntohl(*blocks); |
940 | kfree(blocks); | |
941 | ||
17b0429d | 942 | if (cmd.error || data.error) |
169f03a0 LW |
943 | return -EIO; |
944 | ||
945 | *written_blocks = result; | |
ec5a19dd | 946 | |
169f03a0 | 947 | return 0; |
ec5a19dd PO |
948 | } |
949 | ||
92c0a0cc AH |
950 | static unsigned int mmc_blk_clock_khz(struct mmc_host *host) |
951 | { | |
952 | if (host->actual_clock) | |
953 | return host->actual_clock / 1000; | |
954 | ||
955 | /* Clock may be subject to a divisor, fudge it by a factor of 2. */ | |
956 | if (host->ios.clock) | |
957 | return host->ios.clock / 2000; | |
958 | ||
959 | /* How can there be no clock */ | |
960 | WARN_ON_ONCE(1); | |
961 | return 100; /* 100 kHz is minimum possible value */ | |
962 | } | |
963 | ||
964 | static unsigned int mmc_blk_data_timeout_ms(struct mmc_host *host, | |
965 | struct mmc_data *data) | |
966 | { | |
967 | unsigned int ms = DIV_ROUND_UP(data->timeout_ns, 1000000); | |
968 | unsigned int khz; | |
969 | ||
970 | if (data->timeout_clks) { | |
971 | khz = mmc_blk_clock_khz(host); | |
972 | ms += DIV_ROUND_UP(data->timeout_clks, khz); | |
973 | } | |
974 | ||
975 | return ms; | |
976 | } | |
977 | ||
0987c6b0 AH |
978 | static inline bool mmc_blk_in_tran_state(u32 status) |
979 | { | |
980 | /* | |
981 | * Some cards mishandle the status bits, so make sure to check both the | |
982 | * busy indication and the card state. | |
983 | */ | |
984 | return status & R1_READY_FOR_DATA && | |
985 | (R1_CURRENT_STATE(status) == R1_STATE_TRAN); | |
986 | } | |
987 | ||
c49433fb | 988 | static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms, |
0fbfd125 | 989 | struct request *req, u32 *resp_errs) |
c49433fb UH |
990 | { |
991 | unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); | |
992 | int err = 0; | |
993 | u32 status; | |
994 | ||
995 | do { | |
7701885e AH |
996 | bool done = time_after(jiffies, timeout); |
997 | ||
2185bc2c | 998 | err = __mmc_send_status(card, &status, 5); |
c49433fb UH |
999 | if (err) { |
1000 | pr_err("%s: error %d requesting status\n", | |
1001 | req->rq_disk->disk_name, err); | |
1002 | return err; | |
1003 | } | |
1004 | ||
c89b4851 AH |
1005 | /* Accumulate any response error bits seen */ |
1006 | if (resp_errs) | |
1007 | *resp_errs |= status; | |
c49433fb UH |
1008 | |
1009 | /* | |
1010 | * Timeout if the device never becomes ready for data and never | |
1011 | * leaves the program state. | |
1012 | */ | |
7701885e | 1013 | if (done) { |
0987c6b0 | 1014 | pr_err("%s: Card stuck in wrong state! %s %s status: %#x\n", |
c49433fb | 1015 | mmc_hostname(card->host), |
0987c6b0 | 1016 | req->rq_disk->disk_name, __func__, status); |
c49433fb UH |
1017 | return -ETIMEDOUT; |
1018 | } | |
1019 | ||
1020 | /* | |
1021 | * Some cards mishandle the status bits, | |
1022 | * so make sure to check both the busy | |
1023 | * indication and the card state. | |
1024 | */ | |
0987c6b0 | 1025 | } while (!mmc_blk_in_tran_state(status)); |
c49433fb UH |
1026 | |
1027 | return err; | |
1028 | } | |
1029 | ||
67716327 AH |
1030 | static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host, |
1031 | int type) | |
1032 | { | |
1033 | int err; | |
1034 | ||
1035 | if (md->reset_done & type) | |
1036 | return -EEXIST; | |
1037 | ||
1038 | md->reset_done |= type; | |
1039 | err = mmc_hw_reset(host); | |
1040 | /* Ensure we switch back to the correct partition */ | |
1041 | if (err != -EOPNOTSUPP) { | |
fc95e30b UH |
1042 | struct mmc_blk_data *main_md = |
1043 | dev_get_drvdata(&host->card->dev); | |
67716327 AH |
1044 | int part_err; |
1045 | ||
1046 | main_md->part_curr = main_md->part_type; | |
1f797edc | 1047 | part_err = mmc_blk_part_switch(host->card, md->part_type); |
67716327 AH |
1048 | if (part_err) { |
1049 | /* | |
1050 | * We have failed to get back into the correct | |
1051 | * partition, so we need to abort the whole request. | |
1052 | */ | |
1053 | return -ENODEV; | |
1054 | } | |
1055 | } | |
1056 | return err; | |
1057 | } | |
1058 | ||
1059 | static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type) | |
1060 | { | |
1061 | md->reset_done &= ~type; | |
1062 | } | |
1063 | ||
5ec12396 LW |
1064 | /* |
1065 | * The non-block commands come back from the block layer after it queued it and | |
1066 | * processed it with all other requests and then they get issued in this | |
1067 | * function. | |
1068 | */ | |
1069 | static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req) | |
1070 | { | |
1071 | struct mmc_queue_req *mq_rq; | |
1072 | struct mmc_card *card = mq->card; | |
1073 | struct mmc_blk_data *md = mq->blkdata; | |
69f7599e | 1074 | struct mmc_blk_ioc_data **idata; |
97548575 | 1075 | bool rpmb_ioctl; |
627c3ccf LW |
1076 | u8 **ext_csd; |
1077 | u32 status; | |
0493f6fe | 1078 | int ret; |
5ec12396 LW |
1079 | int i; |
1080 | ||
1081 | mq_rq = req_to_mmc_queue_req(req); | |
97548575 | 1082 | rpmb_ioctl = (mq_rq->drv_op == MMC_DRV_OP_IOCTL_RPMB); |
5ec12396 LW |
1083 | |
1084 | switch (mq_rq->drv_op) { | |
1085 | case MMC_DRV_OP_IOCTL: | |
97548575 | 1086 | case MMC_DRV_OP_IOCTL_RPMB: |
69f7599e | 1087 | idata = mq_rq->drv_op_data; |
7432b49b | 1088 | for (i = 0, ret = 0; i < mq_rq->ioc_count; i++) { |
69f7599e | 1089 | ret = __mmc_blk_ioctl_cmd(card, md, idata[i]); |
0493f6fe | 1090 | if (ret) |
5ec12396 LW |
1091 | break; |
1092 | } | |
5ec12396 | 1093 | /* Always switch back to main area after RPMB access */ |
97548575 LW |
1094 | if (rpmb_ioctl) |
1095 | mmc_blk_part_switch(card, 0); | |
0493f6fe LW |
1096 | break; |
1097 | case MMC_DRV_OP_BOOT_WP: | |
1098 | ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP, | |
1099 | card->ext_csd.boot_ro_lock | | |
1100 | EXT_CSD_BOOT_WP_B_PWR_WP_EN, | |
1101 | card->ext_csd.part_time); | |
1102 | if (ret) | |
1103 | pr_err("%s: Locking boot partition ro until next power on failed: %d\n", | |
1104 | md->disk->disk_name, ret); | |
1105 | else | |
1106 | card->ext_csd.boot_ro_lock |= | |
1107 | EXT_CSD_BOOT_WP_B_PWR_WP_EN; | |
5ec12396 | 1108 | break; |
627c3ccf LW |
1109 | case MMC_DRV_OP_GET_CARD_STATUS: |
1110 | ret = mmc_send_status(card, &status); | |
1111 | if (!ret) | |
1112 | ret = status; | |
1113 | break; | |
1114 | case MMC_DRV_OP_GET_EXT_CSD: | |
1115 | ext_csd = mq_rq->drv_op_data; | |
1116 | ret = mmc_get_ext_csd(card, ext_csd); | |
1117 | break; | |
5ec12396 | 1118 | default: |
0493f6fe LW |
1119 | pr_err("%s: unknown driver specific operation\n", |
1120 | md->disk->disk_name); | |
1121 | ret = -EINVAL; | |
5ec12396 LW |
1122 | break; |
1123 | } | |
0493f6fe | 1124 | mq_rq->drv_op_result = ret; |
0fbfd125 | 1125 | blk_mq_end_request(req, ret ? BLK_STS_IOERR : BLK_STS_OK); |
5ec12396 LW |
1126 | } |
1127 | ||
df061588 | 1128 | static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) |
bd788c96 | 1129 | { |
7db3028e | 1130 | struct mmc_blk_data *md = mq->blkdata; |
bd788c96 AH |
1131 | struct mmc_card *card = md->queue.card; |
1132 | unsigned int from, nr, arg; | |
67716327 | 1133 | int err = 0, type = MMC_BLK_DISCARD; |
2a842aca | 1134 | blk_status_t status = BLK_STS_OK; |
bd788c96 | 1135 | |
bd788c96 | 1136 | if (!mmc_can_erase(card)) { |
2a842aca | 1137 | status = BLK_STS_NOTSUPP; |
8cb6ed17 | 1138 | goto fail; |
bd788c96 AH |
1139 | } |
1140 | ||
1141 | from = blk_rq_pos(req); | |
1142 | nr = blk_rq_sectors(req); | |
1143 | ||
b3bf9153 KP |
1144 | if (mmc_can_discard(card)) |
1145 | arg = MMC_DISCARD_ARG; | |
1146 | else if (mmc_can_trim(card)) | |
bd788c96 AH |
1147 | arg = MMC_TRIM_ARG; |
1148 | else | |
1149 | arg = MMC_ERASE_ARG; | |
164b50b3 GU |
1150 | do { |
1151 | err = 0; | |
1152 | if (card->quirks & MMC_QUIRK_INAND_CMD38) { | |
1153 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | |
1154 | INAND_CMD38_ARG_EXT_CSD, | |
1155 | arg == MMC_TRIM_ARG ? | |
1156 | INAND_CMD38_ARG_TRIM : | |
1157 | INAND_CMD38_ARG_ERASE, | |
1158 | 0); | |
1159 | } | |
1160 | if (!err) | |
1161 | err = mmc_erase(card, from, nr, arg); | |
1162 | } while (err == -EIO && !mmc_blk_reset(md, card->host, type)); | |
2a842aca CH |
1163 | if (err) |
1164 | status = BLK_STS_IOERR; | |
1165 | else | |
67716327 | 1166 | mmc_blk_reset_success(md, type); |
8cb6ed17 | 1167 | fail: |
0fbfd125 | 1168 | blk_mq_end_request(req, status); |
bd788c96 AH |
1169 | } |
1170 | ||
df061588 | 1171 | static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq, |
49804548 AH |
1172 | struct request *req) |
1173 | { | |
7db3028e | 1174 | struct mmc_blk_data *md = mq->blkdata; |
49804548 | 1175 | struct mmc_card *card = md->queue.card; |
775a9362 | 1176 | unsigned int from, nr, arg; |
67716327 | 1177 | int err = 0, type = MMC_BLK_SECDISCARD; |
2a842aca | 1178 | blk_status_t status = BLK_STS_OK; |
49804548 | 1179 | |
775a9362 | 1180 | if (!(mmc_can_secure_erase_trim(card))) { |
2a842aca | 1181 | status = BLK_STS_NOTSUPP; |
49804548 AH |
1182 | goto out; |
1183 | } | |
1184 | ||
28302812 AH |
1185 | from = blk_rq_pos(req); |
1186 | nr = blk_rq_sectors(req); | |
1187 | ||
775a9362 ME |
1188 | if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr)) |
1189 | arg = MMC_SECURE_TRIM1_ARG; | |
1190 | else | |
1191 | arg = MMC_SECURE_ERASE_ARG; | |
d9ddd629 | 1192 | |
67716327 | 1193 | retry: |
6a7a6b45 AW |
1194 | if (card->quirks & MMC_QUIRK_INAND_CMD38) { |
1195 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | |
1196 | INAND_CMD38_ARG_EXT_CSD, | |
1197 | arg == MMC_SECURE_TRIM1_ARG ? | |
1198 | INAND_CMD38_ARG_SECTRIM1 : | |
1199 | INAND_CMD38_ARG_SECERASE, | |
1200 | 0); | |
1201 | if (err) | |
28302812 | 1202 | goto out_retry; |
6a7a6b45 | 1203 | } |
28302812 | 1204 | |
49804548 | 1205 | err = mmc_erase(card, from, nr, arg); |
28302812 AH |
1206 | if (err == -EIO) |
1207 | goto out_retry; | |
2a842aca CH |
1208 | if (err) { |
1209 | status = BLK_STS_IOERR; | |
28302812 | 1210 | goto out; |
2a842aca | 1211 | } |
28302812 AH |
1212 | |
1213 | if (arg == MMC_SECURE_TRIM1_ARG) { | |
6a7a6b45 AW |
1214 | if (card->quirks & MMC_QUIRK_INAND_CMD38) { |
1215 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | |
1216 | INAND_CMD38_ARG_EXT_CSD, | |
1217 | INAND_CMD38_ARG_SECTRIM2, | |
1218 | 0); | |
1219 | if (err) | |
28302812 | 1220 | goto out_retry; |
6a7a6b45 | 1221 | } |
28302812 | 1222 | |
49804548 | 1223 | err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG); |
28302812 AH |
1224 | if (err == -EIO) |
1225 | goto out_retry; | |
2a842aca CH |
1226 | if (err) { |
1227 | status = BLK_STS_IOERR; | |
28302812 | 1228 | goto out; |
2a842aca | 1229 | } |
6a7a6b45 | 1230 | } |
28302812 | 1231 | |
28302812 AH |
1232 | out_retry: |
1233 | if (err && !mmc_blk_reset(md, card->host, type)) | |
67716327 AH |
1234 | goto retry; |
1235 | if (!err) | |
1236 | mmc_blk_reset_success(md, type); | |
28302812 | 1237 | out: |
0fbfd125 | 1238 | blk_mq_end_request(req, status); |
49804548 AH |
1239 | } |
1240 | ||
df061588 | 1241 | static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req) |
f4c5522b | 1242 | { |
7db3028e | 1243 | struct mmc_blk_data *md = mq->blkdata; |
881d1c25 SJ |
1244 | struct mmc_card *card = md->queue.card; |
1245 | int ret = 0; | |
1246 | ||
1247 | ret = mmc_flush_cache(card); | |
0fbfd125 | 1248 | blk_mq_end_request(req, ret ? BLK_STS_IOERR : BLK_STS_OK); |
f4c5522b AW |
1249 | } |
1250 | ||
1251 | /* | |
1252 | * Reformat current write as a reliable write, supporting | |
1253 | * both legacy and the enhanced reliable write MMC cards. | |
1254 | * In each transfer we'll handle only as much as a single | |
1255 | * reliable write can handle, thus finish the request in | |
1256 | * partial completions. | |
1257 | */ | |
d0c97cfb AW |
1258 | static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq, |
1259 | struct mmc_card *card, | |
1260 | struct request *req) | |
f4c5522b | 1261 | { |
f4c5522b AW |
1262 | if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) { |
1263 | /* Legacy mode imposes restrictions on transfers. */ | |
9cb38f7a | 1264 | if (!IS_ALIGNED(blk_rq_pos(req), card->ext_csd.rel_sectors)) |
f4c5522b AW |
1265 | brq->data.blocks = 1; |
1266 | ||
1267 | if (brq->data.blocks > card->ext_csd.rel_sectors) | |
1268 | brq->data.blocks = card->ext_csd.rel_sectors; | |
1269 | else if (brq->data.blocks < card->ext_csd.rel_sectors) | |
1270 | brq->data.blocks = 1; | |
1271 | } | |
f4c5522b AW |
1272 | } |
1273 | ||
f47a1fe3 AH |
1274 | #define CMD_ERRORS_EXCL_OOR \ |
1275 | (R1_ADDRESS_ERROR | /* Misaligned address */ \ | |
4c2b8f26 RKAL |
1276 | R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\ |
1277 | R1_WP_VIOLATION | /* Tried to write to protected block */ \ | |
a04e6bae | 1278 | R1_CARD_ECC_FAILED | /* Card ECC failed */ \ |
4c2b8f26 RKAL |
1279 | R1_CC_ERROR | /* Card controller error */ \ |
1280 | R1_ERROR) /* General/unknown error */ | |
1281 | ||
f47a1fe3 AH |
1282 | #define CMD_ERRORS \ |
1283 | (CMD_ERRORS_EXCL_OOR | \ | |
1284 | R1_OUT_OF_RANGE) /* Command argument out of range */ \ | |
1285 | ||
d83c2dba | 1286 | static void mmc_blk_eval_resp_error(struct mmc_blk_request *brq) |
a04e6bae | 1287 | { |
d83c2dba | 1288 | u32 val; |
a04e6bae | 1289 | |
d83c2dba SL |
1290 | /* |
1291 | * Per the SD specification(physical layer version 4.10)[1], | |
1292 | * section 4.3.3, it explicitly states that "When the last | |
1293 | * block of user area is read using CMD18, the host should | |
1294 | * ignore OUT_OF_RANGE error that may occur even the sequence | |
1295 | * is correct". And JESD84-B51 for eMMC also has a similar | |
1296 | * statement on section 6.8.3. | |
1297 | * | |
1298 | * Multiple block read/write could be done by either predefined | |
1299 | * method, namely CMD23, or open-ending mode. For open-ending mode, | |
1300 | * we should ignore the OUT_OF_RANGE error as it's normal behaviour. | |
1301 | * | |
1302 | * However the spec[1] doesn't tell us whether we should also | |
1303 | * ignore that for predefined method. But per the spec[1], section | |
1304 | * 4.15 Set Block Count Command, it says"If illegal block count | |
1305 | * is set, out of range error will be indicated during read/write | |
1306 | * operation (For example, data transfer is stopped at user area | |
1307 | * boundary)." In another word, we could expect a out of range error | |
1308 | * in the response for the following CMD18/25. And if argument of | |
1309 | * CMD23 + the argument of CMD18/25 exceed the max number of blocks, | |
1310 | * we could also expect to get a -ETIMEDOUT or any error number from | |
1311 | * the host drivers due to missing data response(for write)/data(for | |
1312 | * read), as the cards will stop the data transfer by itself per the | |
1313 | * spec. So we only need to check R1_OUT_OF_RANGE for open-ending mode. | |
1314 | */ | |
1315 | ||
1316 | if (!brq->stop.error) { | |
1317 | bool oor_with_open_end; | |
1318 | /* If there is no error yet, check R1 response */ | |
1319 | ||
1320 | val = brq->stop.resp[0] & CMD_ERRORS; | |
1321 | oor_with_open_end = val & R1_OUT_OF_RANGE && !brq->mrq.sbc; | |
1322 | ||
1323 | if (val && !oor_with_open_end) | |
1324 | brq->stop.error = -EIO; | |
1325 | } | |
a04e6bae WS |
1326 | } |
1327 | ||
ca5717f7 | 1328 | static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq, |
d3377c01 AH |
1329 | int disable_multi, bool *do_rel_wr_p, |
1330 | bool *do_data_tag_p) | |
1da177e4 | 1331 | { |
ca5717f7 AH |
1332 | struct mmc_blk_data *md = mq->blkdata; |
1333 | struct mmc_card *card = md->queue.card; | |
54d49d77 | 1334 | struct mmc_blk_request *brq = &mqrq->brq; |
67e69d52 | 1335 | struct request *req = mmc_queue_req_to_req(mqrq); |
d3377c01 | 1336 | bool do_rel_wr, do_data_tag; |
1da177e4 | 1337 | |
f4c5522b AW |
1338 | /* |
1339 | * Reliable writes are used to implement Forced Unit Access and | |
d3df0465 | 1340 | * are supported only on MMCs. |
f4c5522b | 1341 | */ |
d3377c01 AH |
1342 | do_rel_wr = (req->cmd_flags & REQ_FUA) && |
1343 | rq_data_dir(req) == WRITE && | |
1344 | (md->flags & MMC_BLK_REL_WR); | |
f4c5522b | 1345 | |
54d49d77 | 1346 | memset(brq, 0, sizeof(struct mmc_blk_request)); |
ca5717f7 | 1347 | |
54d49d77 | 1348 | brq->mrq.data = &brq->data; |
93482b3d | 1349 | brq->mrq.tag = req->tag; |
1da177e4 | 1350 | |
54d49d77 PF |
1351 | brq->stop.opcode = MMC_STOP_TRANSMISSION; |
1352 | brq->stop.arg = 0; | |
ca5717f7 AH |
1353 | |
1354 | if (rq_data_dir(req) == READ) { | |
1355 | brq->data.flags = MMC_DATA_READ; | |
1356 | brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; | |
1357 | } else { | |
1358 | brq->data.flags = MMC_DATA_WRITE; | |
1359 | brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; | |
1360 | } | |
1361 | ||
1362 | brq->data.blksz = 512; | |
54d49d77 | 1363 | brq->data.blocks = blk_rq_sectors(req); |
93482b3d AH |
1364 | brq->data.blk_addr = blk_rq_pos(req); |
1365 | ||
1366 | /* | |
1367 | * The command queue supports 2 priorities: "high" (1) and "simple" (0). | |
1368 | * The eMMC will give "high" priority tasks priority over "simple" | |
1369 | * priority tasks. Here we always set "simple" priority by not setting | |
1370 | * MMC_DATA_PRIO. | |
1371 | */ | |
6a79e391 | 1372 | |
54d49d77 PF |
1373 | /* |
1374 | * The block layer doesn't support all sector count | |
1375 | * restrictions, so we need to be prepared for too big | |
1376 | * requests. | |
1377 | */ | |
1378 | if (brq->data.blocks > card->host->max_blk_count) | |
1379 | brq->data.blocks = card->host->max_blk_count; | |
1da177e4 | 1380 | |
2bf22b39 PW |
1381 | if (brq->data.blocks > 1) { |
1382 | /* | |
1383 | * After a read error, we redo the request one sector | |
1384 | * at a time in order to accurately determine which | |
1385 | * sectors can be read successfully. | |
1386 | */ | |
1387 | if (disable_multi) | |
1388 | brq->data.blocks = 1; | |
1389 | ||
2e47e842 KM |
1390 | /* |
1391 | * Some controllers have HW issues while operating | |
1392 | * in multiple I/O mode | |
1393 | */ | |
1394 | if (card->host->ops->multi_io_quirk) | |
1395 | brq->data.blocks = card->host->ops->multi_io_quirk(card, | |
1396 | (rq_data_dir(req) == READ) ? | |
1397 | MMC_DATA_READ : MMC_DATA_WRITE, | |
1398 | brq->data.blocks); | |
2bf22b39 | 1399 | } |
d0c97cfb | 1400 | |
93482b3d | 1401 | if (do_rel_wr) { |
ca5717f7 | 1402 | mmc_apply_rel_rw(brq, card, req); |
93482b3d AH |
1403 | brq->data.flags |= MMC_DATA_REL_WR; |
1404 | } | |
ca5717f7 AH |
1405 | |
1406 | /* | |
1407 | * Data tag is used only during writing meta data to speed | |
1408 | * up write and any subsequent read of this meta data | |
1409 | */ | |
d3377c01 AH |
1410 | do_data_tag = card->ext_csd.data_tag_unit_size && |
1411 | (req->cmd_flags & REQ_META) && | |
1412 | (rq_data_dir(req) == WRITE) && | |
1413 | ((brq->data.blocks * brq->data.blksz) >= | |
1414 | card->ext_csd.data_tag_unit_size); | |
ca5717f7 | 1415 | |
93482b3d AH |
1416 | if (do_data_tag) |
1417 | brq->data.flags |= MMC_DATA_DAT_TAG; | |
1418 | ||
ca5717f7 AH |
1419 | mmc_set_data_timeout(&brq->data, card); |
1420 | ||
1421 | brq->data.sg = mqrq->sg; | |
1422 | brq->data.sg_len = mmc_queue_map_sg(mq, mqrq); | |
1423 | ||
1424 | /* | |
1425 | * Adjust the sg list so it is the same size as the | |
1426 | * request. | |
1427 | */ | |
1428 | if (brq->data.blocks != blk_rq_sectors(req)) { | |
1429 | int i, data_size = brq->data.blocks << 9; | |
1430 | struct scatterlist *sg; | |
1431 | ||
1432 | for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) { | |
1433 | data_size -= sg->length; | |
1434 | if (data_size <= 0) { | |
1435 | sg->length += data_size; | |
1436 | i++; | |
1437 | break; | |
1438 | } | |
1439 | } | |
1440 | brq->data.sg_len = i; | |
1441 | } | |
1442 | ||
d3377c01 AH |
1443 | if (do_rel_wr_p) |
1444 | *do_rel_wr_p = do_rel_wr; | |
1445 | ||
1446 | if (do_data_tag_p) | |
1447 | *do_data_tag_p = do_data_tag; | |
ca5717f7 AH |
1448 | } |
1449 | ||
1e8e55b6 AH |
1450 | #define MMC_CQE_RETRIES 2 |
1451 | ||
1452 | static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req) | |
1453 | { | |
1454 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); | |
1455 | struct mmc_request *mrq = &mqrq->brq.mrq; | |
1456 | struct request_queue *q = req->q; | |
1457 | struct mmc_host *host = mq->card->host; | |
1458 | unsigned long flags; | |
1459 | bool put_card; | |
1460 | int err; | |
1461 | ||
1462 | mmc_cqe_post_req(host, mrq); | |
1463 | ||
1464 | if (mrq->cmd && mrq->cmd->error) | |
1465 | err = mrq->cmd->error; | |
1466 | else if (mrq->data && mrq->data->error) | |
1467 | err = mrq->data->error; | |
1468 | else | |
1469 | err = 0; | |
1470 | ||
1471 | if (err) { | |
1472 | if (mqrq->retries++ < MMC_CQE_RETRIES) | |
1473 | blk_mq_requeue_request(req, true); | |
1474 | else | |
1475 | blk_mq_end_request(req, BLK_STS_IOERR); | |
1476 | } else if (mrq->data) { | |
1477 | if (blk_update_request(req, BLK_STS_OK, mrq->data->bytes_xfered)) | |
1478 | blk_mq_requeue_request(req, true); | |
1479 | else | |
1480 | __blk_mq_end_request(req, BLK_STS_OK); | |
1481 | } else { | |
1482 | blk_mq_end_request(req, BLK_STS_OK); | |
1483 | } | |
1484 | ||
1485 | spin_lock_irqsave(q->queue_lock, flags); | |
1486 | ||
1487 | mq->in_flight[mmc_issue_type(mq, req)] -= 1; | |
1488 | ||
1489 | put_card = (mmc_tot_in_flight(mq) == 0); | |
1490 | ||
1491 | mmc_cqe_check_busy(mq); | |
1492 | ||
1493 | spin_unlock_irqrestore(q->queue_lock, flags); | |
1494 | ||
1495 | if (!mq->cqe_busy) | |
1496 | blk_mq_run_hw_queues(q, true); | |
1497 | ||
1498 | if (put_card) | |
1499 | mmc_put_card(mq->card, &mq->ctx); | |
1500 | } | |
1501 | ||
1502 | void mmc_blk_cqe_recovery(struct mmc_queue *mq) | |
1503 | { | |
1504 | struct mmc_card *card = mq->card; | |
1505 | struct mmc_host *host = card->host; | |
1506 | int err; | |
1507 | ||
1508 | pr_debug("%s: CQE recovery start\n", mmc_hostname(host)); | |
1509 | ||
1510 | err = mmc_cqe_recovery(host); | |
1511 | if (err) | |
1512 | mmc_blk_reset(mq->blkdata, host, MMC_BLK_CQE_RECOVERY); | |
1513 | else | |
1514 | mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY); | |
1515 | ||
1516 | pr_debug("%s: CQE recovery done\n", mmc_hostname(host)); | |
1517 | } | |
1518 | ||
1519 | static void mmc_blk_cqe_req_done(struct mmc_request *mrq) | |
1520 | { | |
1521 | struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req, | |
1522 | brq.mrq); | |
1523 | struct request *req = mmc_queue_req_to_req(mqrq); | |
1524 | struct request_queue *q = req->q; | |
1525 | struct mmc_queue *mq = q->queuedata; | |
1526 | ||
1527 | /* | |
1528 | * Block layer timeouts race with completions which means the normal | |
1529 | * completion path cannot be used during recovery. | |
1530 | */ | |
1531 | if (mq->in_recovery) | |
1532 | mmc_blk_cqe_complete_rq(mq, req); | |
1533 | else | |
1534 | blk_mq_complete_request(req); | |
1535 | } | |
1536 | ||
1537 | static int mmc_blk_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq) | |
1538 | { | |
1539 | mrq->done = mmc_blk_cqe_req_done; | |
1540 | mrq->recovery_notifier = mmc_cqe_recovery_notifier; | |
1541 | ||
1542 | return mmc_cqe_start_req(host, mrq); | |
1543 | } | |
1544 | ||
1545 | static struct mmc_request *mmc_blk_cqe_prep_dcmd(struct mmc_queue_req *mqrq, | |
1546 | struct request *req) | |
1547 | { | |
1548 | struct mmc_blk_request *brq = &mqrq->brq; | |
1549 | ||
1550 | memset(brq, 0, sizeof(*brq)); | |
1551 | ||
1552 | brq->mrq.cmd = &brq->cmd; | |
1553 | brq->mrq.tag = req->tag; | |
1554 | ||
1555 | return &brq->mrq; | |
1556 | } | |
1557 | ||
1558 | static int mmc_blk_cqe_issue_flush(struct mmc_queue *mq, struct request *req) | |
1559 | { | |
1560 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); | |
1561 | struct mmc_request *mrq = mmc_blk_cqe_prep_dcmd(mqrq, req); | |
1562 | ||
1563 | mrq->cmd->opcode = MMC_SWITCH; | |
1564 | mrq->cmd->arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | | |
1565 | (EXT_CSD_FLUSH_CACHE << 16) | | |
1566 | (1 << 8) | | |
1567 | EXT_CSD_CMD_SET_NORMAL; | |
1568 | mrq->cmd->flags = MMC_CMD_AC | MMC_RSP_R1B; | |
1569 | ||
1570 | return mmc_blk_cqe_start_req(mq->card->host, mrq); | |
1571 | } | |
1572 | ||
1573 | static int mmc_blk_cqe_issue_rw_rq(struct mmc_queue *mq, struct request *req) | |
1574 | { | |
1575 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); | |
1576 | ||
1577 | mmc_blk_data_prep(mq, mqrq, 0, NULL, NULL); | |
1578 | ||
1579 | return mmc_blk_cqe_start_req(mq->card->host, &mqrq->brq.mrq); | |
1580 | } | |
1581 | ||
ca5717f7 AH |
1582 | static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, |
1583 | struct mmc_card *card, | |
1584 | int disable_multi, | |
1585 | struct mmc_queue *mq) | |
1586 | { | |
1587 | u32 readcmd, writecmd; | |
1588 | struct mmc_blk_request *brq = &mqrq->brq; | |
67e69d52 | 1589 | struct request *req = mmc_queue_req_to_req(mqrq); |
ca5717f7 AH |
1590 | struct mmc_blk_data *md = mq->blkdata; |
1591 | bool do_rel_wr, do_data_tag; | |
1592 | ||
1593 | mmc_blk_data_prep(mq, mqrq, disable_multi, &do_rel_wr, &do_data_tag); | |
1594 | ||
1595 | brq->mrq.cmd = &brq->cmd; | |
1596 | ||
1597 | brq->cmd.arg = blk_rq_pos(req); | |
1598 | if (!mmc_card_blockaddr(card)) | |
1599 | brq->cmd.arg <<= 9; | |
1600 | brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; | |
1601 | ||
54d49d77 PF |
1602 | if (brq->data.blocks > 1 || do_rel_wr) { |
1603 | /* SPI multiblock writes terminate using a special | |
1604 | * token, not a STOP_TRANSMISSION request. | |
d0c97cfb | 1605 | */ |
54d49d77 PF |
1606 | if (!mmc_host_is_spi(card->host) || |
1607 | rq_data_dir(req) == READ) | |
1608 | brq->mrq.stop = &brq->stop; | |
1609 | readcmd = MMC_READ_MULTIPLE_BLOCK; | |
1610 | writecmd = MMC_WRITE_MULTIPLE_BLOCK; | |
1611 | } else { | |
1612 | brq->mrq.stop = NULL; | |
1613 | readcmd = MMC_READ_SINGLE_BLOCK; | |
1614 | writecmd = MMC_WRITE_BLOCK; | |
1615 | } | |
ca5717f7 | 1616 | brq->cmd.opcode = rq_data_dir(req) == READ ? readcmd : writecmd; |
4265900e | 1617 | |
54d49d77 PF |
1618 | /* |
1619 | * Pre-defined multi-block transfers are preferable to | |
1620 | * open ended-ones (and necessary for reliable writes). | |
1621 | * However, it is not sufficient to just send CMD23, | |
1622 | * and avoid the final CMD12, as on an error condition | |
1623 | * CMD12 (stop) needs to be sent anyway. This, coupled | |
1624 | * with Auto-CMD23 enhancements provided by some | |
1625 | * hosts, means that the complexity of dealing | |
1626 | * with this is best left to the host. If CMD23 is | |
1627 | * supported by card and host, we'll fill sbc in and let | |
1628 | * the host deal with handling it correctly. This means | |
1629 | * that for hosts that don't expose MMC_CAP_CMD23, no | |
1630 | * change of behavior will be observed. | |
1631 | * | |
1632 | * N.B: Some MMC cards experience perf degradation. | |
1633 | * We'll avoid using CMD23-bounded multiblock writes for | |
1634 | * these, while retaining features like reliable writes. | |
1635 | */ | |
4265900e SD |
1636 | if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) && |
1637 | (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) || | |
1638 | do_data_tag)) { | |
54d49d77 PF |
1639 | brq->sbc.opcode = MMC_SET_BLOCK_COUNT; |
1640 | brq->sbc.arg = brq->data.blocks | | |
4265900e SD |
1641 | (do_rel_wr ? (1 << 31) : 0) | |
1642 | (do_data_tag ? (1 << 29) : 0); | |
54d49d77 PF |
1643 | brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; |
1644 | brq->mrq.sbc = &brq->sbc; | |
1645 | } | |
54d49d77 | 1646 | } |
6a79e391 | 1647 | |
81196976 | 1648 | #define MMC_MAX_RETRIES 5 |
7eb43d53 | 1649 | #define MMC_DATA_RETRIES 2 |
81196976 AH |
1650 | #define MMC_NO_RETRIES (MMC_MAX_RETRIES + 1) |
1651 | ||
7eb43d53 AH |
1652 | static int mmc_blk_send_stop(struct mmc_card *card, unsigned int timeout) |
1653 | { | |
1654 | struct mmc_command cmd = { | |
1655 | .opcode = MMC_STOP_TRANSMISSION, | |
1656 | .flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC, | |
1657 | /* Some hosts wait for busy anyway, so provide a busy timeout */ | |
1658 | .busy_timeout = timeout, | |
1659 | }; | |
1660 | ||
1661 | return mmc_wait_for_cmd(card->host, &cmd, 5); | |
1662 | } | |
1663 | ||
1664 | static int mmc_blk_fix_state(struct mmc_card *card, struct request *req) | |
1665 | { | |
1666 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); | |
1667 | struct mmc_blk_request *brq = &mqrq->brq; | |
1668 | unsigned int timeout = mmc_blk_data_timeout_ms(card->host, &brq->data); | |
1669 | int err; | |
1670 | ||
1671 | mmc_retune_hold_now(card->host); | |
1672 | ||
1673 | mmc_blk_send_stop(card, timeout); | |
1674 | ||
0fbfd125 | 1675 | err = card_busy_detect(card, timeout, req, NULL); |
7eb43d53 AH |
1676 | |
1677 | mmc_retune_release(card->host); | |
1678 | ||
1679 | return err; | |
1680 | } | |
1681 | ||
81196976 AH |
1682 | #define MMC_READ_SINGLE_RETRIES 2 |
1683 | ||
1684 | /* Single sector read during recovery */ | |
1685 | static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req) | |
1686 | { | |
1687 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); | |
1688 | struct mmc_request *mrq = &mqrq->brq.mrq; | |
1689 | struct mmc_card *card = mq->card; | |
1690 | struct mmc_host *host = card->host; | |
1691 | blk_status_t error = BLK_STS_OK; | |
1692 | int retries = 0; | |
1693 | ||
1694 | do { | |
1695 | u32 status; | |
1696 | int err; | |
1697 | ||
1698 | mmc_blk_rw_rq_prep(mqrq, card, 1, mq); | |
1699 | ||
1700 | mmc_wait_for_req(host, mrq); | |
1701 | ||
1702 | err = mmc_send_status(card, &status); | |
1703 | if (err) | |
1704 | goto error_exit; | |
1705 | ||
1706 | if (!mmc_host_is_spi(host) && | |
7eb43d53 AH |
1707 | !mmc_blk_in_tran_state(status)) { |
1708 | err = mmc_blk_fix_state(card, req); | |
81196976 AH |
1709 | if (err) |
1710 | goto error_exit; | |
1711 | } | |
1712 | ||
1713 | if (mrq->cmd->error && retries++ < MMC_READ_SINGLE_RETRIES) | |
1714 | continue; | |
1715 | ||
1716 | retries = 0; | |
1717 | ||
1718 | if (mrq->cmd->error || | |
1719 | mrq->data->error || | |
1720 | (!mmc_host_is_spi(host) && | |
1721 | (mrq->cmd->resp[0] & CMD_ERRORS || status & CMD_ERRORS))) | |
1722 | error = BLK_STS_IOERR; | |
1723 | else | |
1724 | error = BLK_STS_OK; | |
1725 | ||
1726 | } while (blk_update_request(req, error, 512)); | |
1727 | ||
1728 | return; | |
1729 | ||
1730 | error_exit: | |
1731 | mrq->data->bytes_xfered = 0; | |
1732 | blk_update_request(req, BLK_STS_IOERR, 512); | |
1733 | /* Let it try the remaining request again */ | |
1734 | if (mqrq->retries > MMC_MAX_RETRIES - 1) | |
1735 | mqrq->retries = MMC_MAX_RETRIES - 1; | |
1736 | } | |
1737 | ||
7eb43d53 AH |
1738 | static inline bool mmc_blk_oor_valid(struct mmc_blk_request *brq) |
1739 | { | |
1740 | return !!brq->mrq.sbc; | |
1741 | } | |
1742 | ||
1743 | static inline u32 mmc_blk_stop_err_bits(struct mmc_blk_request *brq) | |
1744 | { | |
1745 | return mmc_blk_oor_valid(brq) ? CMD_ERRORS : CMD_ERRORS_EXCL_OOR; | |
1746 | } | |
1747 | ||
1748 | /* | |
1749 | * Check for errors the host controller driver might not have seen such as | |
1750 | * response mode errors or invalid card state. | |
1751 | */ | |
1752 | static bool mmc_blk_status_error(struct request *req, u32 status) | |
1753 | { | |
1754 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); | |
1755 | struct mmc_blk_request *brq = &mqrq->brq; | |
1756 | struct mmc_queue *mq = req->q->queuedata; | |
1757 | u32 stop_err_bits; | |
1758 | ||
1759 | if (mmc_host_is_spi(mq->card->host)) | |
aa950144 | 1760 | return false; |
7eb43d53 AH |
1761 | |
1762 | stop_err_bits = mmc_blk_stop_err_bits(brq); | |
1763 | ||
1764 | return brq->cmd.resp[0] & CMD_ERRORS || | |
1765 | brq->stop.resp[0] & stop_err_bits || | |
1766 | status & stop_err_bits || | |
1767 | (rq_data_dir(req) == WRITE && !mmc_blk_in_tran_state(status)); | |
1768 | } | |
1769 | ||
1770 | static inline bool mmc_blk_cmd_started(struct mmc_blk_request *brq) | |
1771 | { | |
1772 | return !brq->sbc.error && !brq->cmd.error && | |
1773 | !(brq->cmd.resp[0] & CMD_ERRORS); | |
1774 | } | |
1775 | ||
1776 | /* | |
1777 | * Requests are completed by mmc_blk_mq_complete_rq() which sets simple | |
1778 | * policy: | |
1779 | * 1. A request that has transferred at least some data is considered | |
1780 | * successful and will be requeued if there is remaining data to | |
1781 | * transfer. | |
1782 | * 2. Otherwise the number of retries is incremented and the request | |
1783 | * will be requeued if there are remaining retries. | |
1784 | * 3. Otherwise the request will be errored out. | |
1785 | * That means mmc_blk_mq_complete_rq() is controlled by bytes_xfered and | |
1786 | * mqrq->retries. So there are only 4 possible actions here: | |
1787 | * 1. do not accept the bytes_xfered value i.e. set it to zero | |
1788 | * 2. change mqrq->retries to determine the number of retries | |
1789 | * 3. try to reset the card | |
1790 | * 4. read one sector at a time | |
1791 | */ | |
81196976 AH |
1792 | static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req) |
1793 | { | |
1794 | int type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE; | |
1795 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); | |
1796 | struct mmc_blk_request *brq = &mqrq->brq; | |
1797 | struct mmc_blk_data *md = mq->blkdata; | |
1798 | struct mmc_card *card = mq->card; | |
7eb43d53 AH |
1799 | u32 status; |
1800 | u32 blocks; | |
1801 | int err; | |
81196976 | 1802 | |
7eb43d53 AH |
1803 | /* |
1804 | * Some errors the host driver might not have seen. Set the number of | |
1805 | * bytes transferred to zero in that case. | |
1806 | */ | |
1807 | err = __mmc_send_status(card, &status, 0); | |
1808 | if (err || mmc_blk_status_error(req, status)) | |
1809 | brq->data.bytes_xfered = 0; | |
81196976 AH |
1810 | |
1811 | mmc_retune_release(card->host); | |
1812 | ||
1813 | /* | |
7eb43d53 AH |
1814 | * Try again to get the status. This also provides an opportunity for |
1815 | * re-tuning. | |
81196976 | 1816 | */ |
7eb43d53 AH |
1817 | if (err) |
1818 | err = __mmc_send_status(card, &status, 0); | |
81196976 | 1819 | |
7eb43d53 AH |
1820 | /* |
1821 | * Nothing more to do after the number of bytes transferred has been | |
1822 | * updated and there is no card. | |
1823 | */ | |
1824 | if (err && mmc_detect_card_removed(card->host)) | |
1825 | return; | |
81196976 | 1826 | |
7eb43d53 AH |
1827 | /* Try to get back to "tran" state */ |
1828 | if (!mmc_host_is_spi(mq->card->host) && | |
1829 | (err || !mmc_blk_in_tran_state(status))) | |
1830 | err = mmc_blk_fix_state(mq->card, req); | |
1831 | ||
1832 | /* | |
1833 | * Special case for SD cards where the card might record the number of | |
1834 | * blocks written. | |
1835 | */ | |
1836 | if (!err && mmc_blk_cmd_started(brq) && mmc_card_sd(card) && | |
1837 | rq_data_dir(req) == WRITE) { | |
1838 | if (mmc_sd_num_wr_blocks(card, &blocks)) | |
1839 | brq->data.bytes_xfered = 0; | |
1840 | else | |
1841 | brq->data.bytes_xfered = blocks << 9; | |
81196976 | 1842 | } |
7eb43d53 AH |
1843 | |
1844 | /* Reset if the card is in a bad state */ | |
1845 | if (!mmc_host_is_spi(mq->card->host) && | |
1846 | err && mmc_blk_reset(md, card->host, type)) { | |
1847 | pr_err("%s: recovery failed!\n", req->rq_disk->disk_name); | |
81196976 | 1848 | mqrq->retries = MMC_NO_RETRIES; |
7eb43d53 AH |
1849 | return; |
1850 | } | |
1851 | ||
1852 | /* | |
1853 | * If anything was done, just return and if there is anything remaining | |
1854 | * on the request it will get requeued. | |
1855 | */ | |
1856 | if (brq->data.bytes_xfered) | |
1857 | return; | |
1858 | ||
1859 | /* Reset before last retry */ | |
1860 | if (mqrq->retries + 1 == MMC_MAX_RETRIES) | |
1861 | mmc_blk_reset(md, card->host, type); | |
1862 | ||
1863 | /* Command errors fail fast, so use all MMC_MAX_RETRIES */ | |
1864 | if (brq->sbc.error || brq->cmd.error) | |
1865 | return; | |
1866 | ||
1867 | /* Reduce the remaining retries for data errors */ | |
1868 | if (mqrq->retries < MMC_MAX_RETRIES - MMC_DATA_RETRIES) { | |
1869 | mqrq->retries = MMC_MAX_RETRIES - MMC_DATA_RETRIES; | |
1870 | return; | |
1871 | } | |
1872 | ||
1873 | /* FIXME: Missing single sector read for large sector size */ | |
1874 | if (!mmc_large_sector(card) && rq_data_dir(req) == READ && | |
1875 | brq->data.blocks > 1) { | |
1876 | /* Read one sector at a time */ | |
1877 | mmc_blk_read_single(mq, req); | |
1878 | return; | |
81196976 AH |
1879 | } |
1880 | } | |
1881 | ||
10f21df4 AH |
1882 | static inline bool mmc_blk_rq_error(struct mmc_blk_request *brq) |
1883 | { | |
1884 | mmc_blk_eval_resp_error(brq); | |
1885 | ||
1886 | return brq->sbc.error || brq->cmd.error || brq->stop.error || | |
1887 | brq->data.error || brq->cmd.resp[0] & CMD_ERRORS; | |
1888 | } | |
1889 | ||
88a51646 AH |
1890 | static int mmc_blk_card_busy(struct mmc_card *card, struct request *req) |
1891 | { | |
1892 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); | |
f47a1fe3 | 1893 | u32 status = 0; |
88a51646 AH |
1894 | int err; |
1895 | ||
1896 | if (mmc_host_is_spi(card->host) || rq_data_dir(req) == READ) | |
1897 | return 0; | |
1898 | ||
0fbfd125 | 1899 | err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, req, &status); |
88a51646 | 1900 | |
f47a1fe3 AH |
1901 | /* |
1902 | * Do not assume data transferred correctly if there are any error bits | |
1903 | * set. | |
1904 | */ | |
1905 | if (status & mmc_blk_stop_err_bits(&mqrq->brq)) { | |
1906 | mqrq->brq.data.bytes_xfered = 0; | |
88a51646 AH |
1907 | err = err ? err : -EIO; |
1908 | } | |
1909 | ||
f47a1fe3 AH |
1910 | /* Copy the exception bit so it will be seen later on */ |
1911 | if (mmc_card_mmc(card) && status & R1_EXCEPTION_EVENT) | |
1912 | mqrq->brq.cmd.resp[0] |= R1_EXCEPTION_EVENT; | |
1913 | ||
88a51646 AH |
1914 | return err; |
1915 | } | |
1916 | ||
10f21df4 AH |
1917 | static inline void mmc_blk_rw_reset_success(struct mmc_queue *mq, |
1918 | struct request *req) | |
1919 | { | |
1920 | int type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE; | |
1921 | ||
1922 | mmc_blk_reset_success(mq->blkdata, type); | |
1923 | } | |
1924 | ||
81196976 AH |
1925 | static void mmc_blk_mq_complete_rq(struct mmc_queue *mq, struct request *req) |
1926 | { | |
1927 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); | |
1928 | unsigned int nr_bytes = mqrq->brq.data.bytes_xfered; | |
1929 | ||
1930 | if (nr_bytes) { | |
1931 | if (blk_update_request(req, BLK_STS_OK, nr_bytes)) | |
1932 | blk_mq_requeue_request(req, true); | |
1933 | else | |
1934 | __blk_mq_end_request(req, BLK_STS_OK); | |
1935 | } else if (!blk_rq_bytes(req)) { | |
1936 | __blk_mq_end_request(req, BLK_STS_IOERR); | |
1937 | } else if (mqrq->retries++ < MMC_MAX_RETRIES) { | |
1938 | blk_mq_requeue_request(req, true); | |
1939 | } else { | |
1940 | if (mmc_card_removed(mq->card)) | |
1941 | req->rq_flags |= RQF_QUIET; | |
1942 | blk_mq_end_request(req, BLK_STS_IOERR); | |
1943 | } | |
1944 | } | |
1945 | ||
1946 | static bool mmc_blk_urgent_bkops_needed(struct mmc_queue *mq, | |
1947 | struct mmc_queue_req *mqrq) | |
1948 | { | |
1949 | return mmc_card_mmc(mq->card) && !mmc_host_is_spi(mq->card->host) && | |
1950 | (mqrq->brq.cmd.resp[0] & R1_EXCEPTION_EVENT || | |
1951 | mqrq->brq.stop.resp[0] & R1_EXCEPTION_EVENT); | |
1952 | } | |
1953 | ||
1954 | static void mmc_blk_urgent_bkops(struct mmc_queue *mq, | |
1955 | struct mmc_queue_req *mqrq) | |
1956 | { | |
1957 | if (mmc_blk_urgent_bkops_needed(mq, mqrq)) | |
1958 | mmc_start_bkops(mq->card, true); | |
1959 | } | |
1960 | ||
1961 | void mmc_blk_mq_complete(struct request *req) | |
1962 | { | |
1963 | struct mmc_queue *mq = req->q->queuedata; | |
1964 | ||
1e8e55b6 AH |
1965 | if (mq->use_cqe) |
1966 | mmc_blk_cqe_complete_rq(mq, req); | |
1967 | else | |
1968 | mmc_blk_mq_complete_rq(mq, req); | |
81196976 AH |
1969 | } |
1970 | ||
1971 | static void mmc_blk_mq_poll_completion(struct mmc_queue *mq, | |
1972 | struct request *req) | |
1973 | { | |
1974 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); | |
88a51646 | 1975 | struct mmc_host *host = mq->card->host; |
81196976 | 1976 | |
88a51646 AH |
1977 | if (mmc_blk_rq_error(&mqrq->brq) || |
1978 | mmc_blk_card_busy(mq->card, req)) { | |
1979 | mmc_blk_mq_rw_recovery(mq, req); | |
1980 | } else { | |
1981 | mmc_blk_rw_reset_success(mq, req); | |
1982 | mmc_retune_release(host); | |
1983 | } | |
81196976 AH |
1984 | |
1985 | mmc_blk_urgent_bkops(mq, mqrq); | |
1986 | } | |
1987 | ||
1988 | static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req) | |
1989 | { | |
1990 | struct request_queue *q = req->q; | |
1991 | unsigned long flags; | |
1992 | bool put_card; | |
1993 | ||
1994 | spin_lock_irqsave(q->queue_lock, flags); | |
1995 | ||
1996 | mq->in_flight[mmc_issue_type(mq, req)] -= 1; | |
1997 | ||
1998 | put_card = (mmc_tot_in_flight(mq) == 0); | |
1999 | ||
2000 | spin_unlock_irqrestore(q->queue_lock, flags); | |
2001 | ||
2002 | if (put_card) | |
2003 | mmc_put_card(mq->card, &mq->ctx); | |
2004 | } | |
2005 | ||
2006 | static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req) | |
2007 | { | |
2008 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); | |
2009 | struct mmc_request *mrq = &mqrq->brq.mrq; | |
2010 | struct mmc_host *host = mq->card->host; | |
2011 | ||
2012 | mmc_post_req(host, mrq, 0); | |
2013 | ||
10f21df4 AH |
2014 | /* |
2015 | * Block layer timeouts race with completions which means the normal | |
2016 | * completion path cannot be used during recovery. | |
2017 | */ | |
2018 | if (mq->in_recovery) | |
2019 | mmc_blk_mq_complete_rq(mq, req); | |
2020 | else | |
2021 | blk_mq_complete_request(req); | |
81196976 AH |
2022 | |
2023 | mmc_blk_mq_dec_in_flight(mq, req); | |
2024 | } | |
2025 | ||
10f21df4 AH |
2026 | void mmc_blk_mq_recovery(struct mmc_queue *mq) |
2027 | { | |
2028 | struct request *req = mq->recovery_req; | |
2029 | struct mmc_host *host = mq->card->host; | |
2030 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); | |
2031 | ||
2032 | mq->recovery_req = NULL; | |
2033 | mq->rw_wait = false; | |
2034 | ||
2035 | if (mmc_blk_rq_error(&mqrq->brq)) { | |
2036 | mmc_retune_hold_now(host); | |
2037 | mmc_blk_mq_rw_recovery(mq, req); | |
2038 | } | |
2039 | ||
2040 | mmc_blk_urgent_bkops(mq, mqrq); | |
2041 | ||
2042 | mmc_blk_mq_post_req(mq, req); | |
2043 | } | |
2044 | ||
81196976 AH |
2045 | static void mmc_blk_mq_complete_prev_req(struct mmc_queue *mq, |
2046 | struct request **prev_req) | |
2047 | { | |
10f21df4 AH |
2048 | if (mmc_host_done_complete(mq->card->host)) |
2049 | return; | |
2050 | ||
81196976 AH |
2051 | mutex_lock(&mq->complete_lock); |
2052 | ||
2053 | if (!mq->complete_req) | |
2054 | goto out_unlock; | |
2055 | ||
2056 | mmc_blk_mq_poll_completion(mq, mq->complete_req); | |
2057 | ||
2058 | if (prev_req) | |
2059 | *prev_req = mq->complete_req; | |
2060 | else | |
2061 | mmc_blk_mq_post_req(mq, mq->complete_req); | |
2062 | ||
2063 | mq->complete_req = NULL; | |
2064 | ||
2065 | out_unlock: | |
2066 | mutex_unlock(&mq->complete_lock); | |
2067 | } | |
2068 | ||
2069 | void mmc_blk_mq_complete_work(struct work_struct *work) | |
2070 | { | |
2071 | struct mmc_queue *mq = container_of(work, struct mmc_queue, | |
2072 | complete_work); | |
2073 | ||
2074 | mmc_blk_mq_complete_prev_req(mq, NULL); | |
2075 | } | |
2076 | ||
2077 | static void mmc_blk_mq_req_done(struct mmc_request *mrq) | |
2078 | { | |
2079 | struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req, | |
2080 | brq.mrq); | |
2081 | struct request *req = mmc_queue_req_to_req(mqrq); | |
2082 | struct request_queue *q = req->q; | |
2083 | struct mmc_queue *mq = q->queuedata; | |
10f21df4 | 2084 | struct mmc_host *host = mq->card->host; |
81196976 | 2085 | unsigned long flags; |
81196976 | 2086 | |
10f21df4 AH |
2087 | if (!mmc_host_done_complete(host)) { |
2088 | bool waiting; | |
81196976 | 2089 | |
10f21df4 AH |
2090 | /* |
2091 | * We cannot complete the request in this context, so record | |
2092 | * that there is a request to complete, and that a following | |
2093 | * request does not need to wait (although it does need to | |
2094 | * complete complete_req first). | |
2095 | */ | |
2096 | spin_lock_irqsave(q->queue_lock, flags); | |
2097 | mq->complete_req = req; | |
2098 | mq->rw_wait = false; | |
2099 | waiting = mq->waiting; | |
2100 | spin_unlock_irqrestore(q->queue_lock, flags); | |
2101 | ||
2102 | /* | |
2103 | * If 'waiting' then the waiting task will complete this | |
2104 | * request, otherwise queue a work to do it. Note that | |
2105 | * complete_work may still race with the dispatch of a following | |
2106 | * request. | |
2107 | */ | |
2108 | if (waiting) | |
2109 | wake_up(&mq->wait); | |
2110 | else | |
2111 | kblockd_schedule_work(&mq->complete_work); | |
2112 | ||
2113 | return; | |
2114 | } | |
2115 | ||
2116 | /* Take the recovery path for errors or urgent background operations */ | |
2117 | if (mmc_blk_rq_error(&mqrq->brq) || | |
2118 | mmc_blk_urgent_bkops_needed(mq, mqrq)) { | |
2119 | spin_lock_irqsave(q->queue_lock, flags); | |
2120 | mq->recovery_needed = true; | |
2121 | mq->recovery_req = req; | |
2122 | spin_unlock_irqrestore(q->queue_lock, flags); | |
81196976 | 2123 | wake_up(&mq->wait); |
10f21df4 AH |
2124 | schedule_work(&mq->recovery_work); |
2125 | return; | |
2126 | } | |
2127 | ||
2128 | mmc_blk_rw_reset_success(mq, req); | |
2129 | ||
2130 | mq->rw_wait = false; | |
2131 | wake_up(&mq->wait); | |
2132 | ||
2133 | mmc_blk_mq_post_req(mq, req); | |
81196976 AH |
2134 | } |
2135 | ||
2136 | static bool mmc_blk_rw_wait_cond(struct mmc_queue *mq, int *err) | |
2137 | { | |
2138 | struct request_queue *q = mq->queue; | |
2139 | unsigned long flags; | |
2140 | bool done; | |
2141 | ||
2142 | /* | |
10f21df4 AH |
2143 | * Wait while there is another request in progress, but not if recovery |
2144 | * is needed. Also indicate whether there is a request waiting to start. | |
81196976 AH |
2145 | */ |
2146 | spin_lock_irqsave(q->queue_lock, flags); | |
10f21df4 AH |
2147 | if (mq->recovery_needed) { |
2148 | *err = -EBUSY; | |
2149 | done = true; | |
2150 | } else { | |
2151 | done = !mq->rw_wait; | |
2152 | } | |
81196976 AH |
2153 | mq->waiting = !done; |
2154 | spin_unlock_irqrestore(q->queue_lock, flags); | |
2155 | ||
2156 | return done; | |
2157 | } | |
2158 | ||
2159 | static int mmc_blk_rw_wait(struct mmc_queue *mq, struct request **prev_req) | |
2160 | { | |
2161 | int err = 0; | |
2162 | ||
2163 | wait_event(mq->wait, mmc_blk_rw_wait_cond(mq, &err)); | |
2164 | ||
2165 | /* Always complete the previous request if there is one */ | |
2166 | mmc_blk_mq_complete_prev_req(mq, prev_req); | |
2167 | ||
2168 | return err; | |
2169 | } | |
2170 | ||
2171 | static int mmc_blk_mq_issue_rw_rq(struct mmc_queue *mq, | |
2172 | struct request *req) | |
2173 | { | |
2174 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); | |
2175 | struct mmc_host *host = mq->card->host; | |
2176 | struct request *prev_req = NULL; | |
2177 | int err = 0; | |
2178 | ||
2179 | mmc_blk_rw_rq_prep(mqrq, mq->card, 0, mq); | |
2180 | ||
2181 | mqrq->brq.mrq.done = mmc_blk_mq_req_done; | |
2182 | ||
2183 | mmc_pre_req(host, &mqrq->brq.mrq); | |
2184 | ||
2185 | err = mmc_blk_rw_wait(mq, &prev_req); | |
2186 | if (err) | |
2187 | goto out_post_req; | |
2188 | ||
2189 | mq->rw_wait = true; | |
2190 | ||
2191 | err = mmc_start_request(host, &mqrq->brq.mrq); | |
2192 | ||
2193 | if (prev_req) | |
2194 | mmc_blk_mq_post_req(mq, prev_req); | |
2195 | ||
10f21df4 | 2196 | if (err) |
81196976 | 2197 | mq->rw_wait = false; |
10f21df4 AH |
2198 | |
2199 | /* Release re-tuning here where there is no synchronization required */ | |
2200 | if (err || mmc_host_done_complete(host)) | |
81196976 | 2201 | mmc_retune_release(host); |
81196976 AH |
2202 | |
2203 | out_post_req: | |
2204 | if (err) | |
2205 | mmc_post_req(host, &mqrq->brq.mrq, err); | |
2206 | ||
2207 | return err; | |
2208 | } | |
2209 | ||
2210 | static int mmc_blk_wait_for_idle(struct mmc_queue *mq, struct mmc_host *host) | |
2211 | { | |
1e8e55b6 AH |
2212 | if (mq->use_cqe) |
2213 | return host->cqe_ops->cqe_wait_for_idle(host); | |
2214 | ||
81196976 AH |
2215 | return mmc_blk_rw_wait(mq, NULL); |
2216 | } | |
2217 | ||
2218 | enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req) | |
2219 | { | |
2220 | struct mmc_blk_data *md = mq->blkdata; | |
2221 | struct mmc_card *card = md->queue.card; | |
2222 | struct mmc_host *host = card->host; | |
2223 | int ret; | |
2224 | ||
2225 | ret = mmc_blk_part_switch(card, md->part_type); | |
2226 | if (ret) | |
2227 | return MMC_REQ_FAILED_TO_START; | |
2228 | ||
2229 | switch (mmc_issue_type(mq, req)) { | |
2230 | case MMC_ISSUE_SYNC: | |
2231 | ret = mmc_blk_wait_for_idle(mq, host); | |
2232 | if (ret) | |
2233 | return MMC_REQ_BUSY; | |
2234 | switch (req_op(req)) { | |
2235 | case REQ_OP_DRV_IN: | |
2236 | case REQ_OP_DRV_OUT: | |
2237 | mmc_blk_issue_drv_op(mq, req); | |
2238 | break; | |
2239 | case REQ_OP_DISCARD: | |
2240 | mmc_blk_issue_discard_rq(mq, req); | |
2241 | break; | |
2242 | case REQ_OP_SECURE_ERASE: | |
2243 | mmc_blk_issue_secdiscard_rq(mq, req); | |
2244 | break; | |
2245 | case REQ_OP_FLUSH: | |
2246 | mmc_blk_issue_flush(mq, req); | |
2247 | break; | |
2248 | default: | |
2249 | WARN_ON_ONCE(1); | |
2250 | return MMC_REQ_FAILED_TO_START; | |
2251 | } | |
2252 | return MMC_REQ_FINISHED; | |
1e8e55b6 | 2253 | case MMC_ISSUE_DCMD: |
81196976 AH |
2254 | case MMC_ISSUE_ASYNC: |
2255 | switch (req_op(req)) { | |
1e8e55b6 AH |
2256 | case REQ_OP_FLUSH: |
2257 | ret = mmc_blk_cqe_issue_flush(mq, req); | |
2258 | break; | |
81196976 AH |
2259 | case REQ_OP_READ: |
2260 | case REQ_OP_WRITE: | |
1e8e55b6 AH |
2261 | if (mq->use_cqe) |
2262 | ret = mmc_blk_cqe_issue_rw_rq(mq, req); | |
2263 | else | |
2264 | ret = mmc_blk_mq_issue_rw_rq(mq, req); | |
81196976 AH |
2265 | break; |
2266 | default: | |
2267 | WARN_ON_ONCE(1); | |
2268 | ret = -EINVAL; | |
2269 | } | |
2270 | if (!ret) | |
2271 | return MMC_REQ_STARTED; | |
2272 | return ret == -EBUSY ? MMC_REQ_BUSY : MMC_REQ_FAILED_TO_START; | |
2273 | default: | |
2274 | WARN_ON_ONCE(1); | |
2275 | return MMC_REQ_FAILED_TO_START; | |
2276 | } | |
2277 | } | |
2278 | ||
a6f6c96b RK |
2279 | static inline int mmc_blk_readonly(struct mmc_card *card) |
2280 | { | |
2281 | return mmc_card_readonly(card) || | |
2282 | !(card->csd.cmdclass & CCC_BLOCK_WRITE); | |
2283 | } | |
2284 | ||
371a689f AW |
2285 | static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, |
2286 | struct device *parent, | |
2287 | sector_t size, | |
2288 | bool default_ro, | |
add710ea JR |
2289 | const char *subname, |
2290 | int area_type) | |
1da177e4 LT |
2291 | { |
2292 | struct mmc_blk_data *md; | |
2293 | int devidx, ret; | |
2294 | ||
a04848c7 | 2295 | devidx = ida_simple_get(&mmc_blk_ida, 0, max_devices, GFP_KERNEL); |
e7b42769 SL |
2296 | if (devidx < 0) { |
2297 | /* | |
2298 | * We get -ENOSPC because there are no more any available | |
2299 | * devidx. The reason may be that, either userspace haven't yet | |
2300 | * unmounted the partitions, which postpones mmc_blk_release() | |
2301 | * from being called, or the device has more partitions than | |
2302 | * what we support. | |
2303 | */ | |
2304 | if (devidx == -ENOSPC) | |
2305 | dev_err(mmc_dev(card->host), | |
2306 | "no more device IDs available\n"); | |
2307 | ||
a04848c7 | 2308 | return ERR_PTR(devidx); |
e7b42769 | 2309 | } |
1da177e4 | 2310 | |
dd00cc48 | 2311 | md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL); |
a6f6c96b RK |
2312 | if (!md) { |
2313 | ret = -ENOMEM; | |
2314 | goto out; | |
2315 | } | |
1da177e4 | 2316 | |
add710ea JR |
2317 | md->area_type = area_type; |
2318 | ||
a6f6c96b RK |
2319 | /* |
2320 | * Set the read-only status based on the supported commands | |
2321 | * and the write protect switch. | |
2322 | */ | |
2323 | md->read_only = mmc_blk_readonly(card); | |
1da177e4 | 2324 | |
5e71b7a6 | 2325 | md->disk = alloc_disk(perdev_minors); |
a6f6c96b RK |
2326 | if (md->disk == NULL) { |
2327 | ret = -ENOMEM; | |
2328 | goto err_kfree; | |
2329 | } | |
1da177e4 | 2330 | |
a6f6c96b | 2331 | spin_lock_init(&md->lock); |
371a689f | 2332 | INIT_LIST_HEAD(&md->part); |
97548575 | 2333 | INIT_LIST_HEAD(&md->rpmbs); |
a6f6c96b | 2334 | md->usage = 1; |
1da177e4 | 2335 | |
d09408ad | 2336 | ret = mmc_init_queue(&md->queue, card, &md->lock, subname); |
a6f6c96b RK |
2337 | if (ret) |
2338 | goto err_putdisk; | |
1da177e4 | 2339 | |
7db3028e | 2340 | md->queue.blkdata = md; |
d2b18394 | 2341 | |
41e3efd0 AH |
2342 | /* |
2343 | * Keep an extra reference to the queue so that we can shutdown the | |
2344 | * queue (i.e. call blk_cleanup_queue()) while there are still | |
2345 | * references to the 'md'. The corresponding blk_put_queue() is in | |
2346 | * mmc_blk_put(). | |
2347 | */ | |
2348 | if (!blk_get_queue(md->queue.queue)) { | |
2349 | mmc_cleanup_queue(&md->queue); | |
2361bfb0 | 2350 | ret = -ENODEV; |
41e3efd0 AH |
2351 | goto err_putdisk; |
2352 | } | |
2353 | ||
fe6b4c88 | 2354 | md->disk->major = MMC_BLOCK_MAJOR; |
5e71b7a6 | 2355 | md->disk->first_minor = devidx * perdev_minors; |
a6f6c96b RK |
2356 | md->disk->fops = &mmc_bdops; |
2357 | md->disk->private_data = md; | |
2358 | md->disk->queue = md->queue.queue; | |
307d8e6f | 2359 | md->parent = parent; |
371a689f | 2360 | set_disk_ro(md->disk, md->read_only || default_ro); |
382c55f8 | 2361 | md->disk->flags = GENHD_FL_EXT_DEVT; |
f5b4d71f | 2362 | if (area_type & (MMC_BLK_DATA_AREA_RPMB | MMC_BLK_DATA_AREA_BOOT)) |
53d8f974 | 2363 | md->disk->flags |= GENHD_FL_NO_PART_SCAN; |
a6f6c96b RK |
2364 | |
2365 | /* | |
2366 | * As discussed on lkml, GENHD_FL_REMOVABLE should: | |
2367 | * | |
2368 | * - be set for removable media with permanent block devices | |
2369 | * - be unset for removable block devices with permanent media | |
2370 | * | |
2371 | * Since MMC block devices clearly fall under the second | |
2372 | * case, we do not set GENHD_FL_REMOVABLE. Userspace | |
2373 | * should use the block device creation/destruction hotplug | |
2374 | * messages to tell when the card is present. | |
2375 | */ | |
2376 | ||
f06c9153 | 2377 | snprintf(md->disk->disk_name, sizeof(md->disk->disk_name), |
9aaf3437 | 2378 | "mmcblk%u%s", card->host->index, subname ? subname : ""); |
a6f6c96b | 2379 | |
a5075eb9 SD |
2380 | if (mmc_card_mmc(card)) |
2381 | blk_queue_logical_block_size(md->queue.queue, | |
2382 | card->ext_csd.data_sector_size); | |
2383 | else | |
2384 | blk_queue_logical_block_size(md->queue.queue, 512); | |
2385 | ||
371a689f | 2386 | set_capacity(md->disk, size); |
d0c97cfb | 2387 | |
f0d89972 | 2388 | if (mmc_host_cmd23(card->host)) { |
0ed50abb DG |
2389 | if ((mmc_card_mmc(card) && |
2390 | card->csd.mmca_vsn >= CSD_SPEC_VER_3) || | |
f0d89972 AW |
2391 | (mmc_card_sd(card) && |
2392 | card->scr.cmds & SD_SCR_CMD23_SUPPORT)) | |
2393 | md->flags |= MMC_BLK_CMD23; | |
2394 | } | |
d0c97cfb AW |
2395 | |
2396 | if (mmc_card_mmc(card) && | |
2397 | md->flags & MMC_BLK_CMD23 && | |
2398 | ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) || | |
2399 | card->ext_csd.rel_sectors)) { | |
2400 | md->flags |= MMC_BLK_REL_WR; | |
e9d5c746 | 2401 | blk_queue_write_cache(md->queue.queue, true, true); |
d0c97cfb AW |
2402 | } |
2403 | ||
371a689f AW |
2404 | return md; |
2405 | ||
2406 | err_putdisk: | |
2407 | put_disk(md->disk); | |
2408 | err_kfree: | |
2409 | kfree(md); | |
2410 | out: | |
a04848c7 | 2411 | ida_simple_remove(&mmc_blk_ida, devidx); |
371a689f AW |
2412 | return ERR_PTR(ret); |
2413 | } | |
2414 | ||
2415 | static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card) | |
2416 | { | |
2417 | sector_t size; | |
a6f6c96b | 2418 | |
85a18ad9 PO |
2419 | if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) { |
2420 | /* | |
2421 | * The EXT_CSD sector count is in number or 512 byte | |
2422 | * sectors. | |
2423 | */ | |
371a689f | 2424 | size = card->ext_csd.sectors; |
85a18ad9 PO |
2425 | } else { |
2426 | /* | |
2427 | * The CSD capacity field is in units of read_blkbits. | |
2428 | * set_capacity takes units of 512 bytes. | |
2429 | */ | |
087de9ed KM |
2430 | size = (typeof(sector_t))card->csd.capacity |
2431 | << (card->csd.read_blkbits - 9); | |
85a18ad9 | 2432 | } |
371a689f | 2433 | |
7a30f2af | 2434 | return mmc_blk_alloc_req(card, &card->dev, size, false, NULL, |
add710ea | 2435 | MMC_BLK_DATA_AREA_MAIN); |
371a689f | 2436 | } |
a6f6c96b | 2437 | |
371a689f AW |
2438 | static int mmc_blk_alloc_part(struct mmc_card *card, |
2439 | struct mmc_blk_data *md, | |
2440 | unsigned int part_type, | |
2441 | sector_t size, | |
2442 | bool default_ro, | |
add710ea JR |
2443 | const char *subname, |
2444 | int area_type) | |
371a689f AW |
2445 | { |
2446 | char cap_str[10]; | |
2447 | struct mmc_blk_data *part_md; | |
2448 | ||
2449 | part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro, | |
add710ea | 2450 | subname, area_type); |
371a689f AW |
2451 | if (IS_ERR(part_md)) |
2452 | return PTR_ERR(part_md); | |
2453 | part_md->part_type = part_type; | |
2454 | list_add(&part_md->part, &md->part); | |
2455 | ||
b9f28d86 | 2456 | string_get_size((u64)get_capacity(part_md->disk), 512, STRING_UNITS_2, |
371a689f | 2457 | cap_str, sizeof(cap_str)); |
a3c76eb9 | 2458 | pr_info("%s: %s %s partition %u %s\n", |
371a689f AW |
2459 | part_md->disk->disk_name, mmc_card_id(card), |
2460 | mmc_card_name(card), part_md->part_type, cap_str); | |
2461 | return 0; | |
2462 | } | |
2463 | ||
97548575 LW |
2464 | /** |
2465 | * mmc_rpmb_ioctl() - ioctl handler for the RPMB chardev | |
2466 | * @filp: the character device file | |
2467 | * @cmd: the ioctl() command | |
2468 | * @arg: the argument from userspace | |
2469 | * | |
2470 | * This will essentially just redirect the ioctl()s coming in over to | |
2471 | * the main block device spawning the RPMB character device. | |
2472 | */ | |
2473 | static long mmc_rpmb_ioctl(struct file *filp, unsigned int cmd, | |
2474 | unsigned long arg) | |
2475 | { | |
2476 | struct mmc_rpmb_data *rpmb = filp->private_data; | |
2477 | int ret; | |
2478 | ||
2479 | switch (cmd) { | |
2480 | case MMC_IOC_CMD: | |
2481 | ret = mmc_blk_ioctl_cmd(rpmb->md, | |
2482 | (struct mmc_ioc_cmd __user *)arg, | |
2483 | rpmb); | |
2484 | break; | |
2485 | case MMC_IOC_MULTI_CMD: | |
2486 | ret = mmc_blk_ioctl_multi_cmd(rpmb->md, | |
2487 | (struct mmc_ioc_multi_cmd __user *)arg, | |
2488 | rpmb); | |
2489 | break; | |
2490 | default: | |
2491 | ret = -EINVAL; | |
2492 | break; | |
2493 | } | |
2494 | ||
2495 | return 0; | |
2496 | } | |
2497 | ||
2498 | #ifdef CONFIG_COMPAT | |
2499 | static long mmc_rpmb_ioctl_compat(struct file *filp, unsigned int cmd, | |
2500 | unsigned long arg) | |
2501 | { | |
2502 | return mmc_rpmb_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); | |
2503 | } | |
2504 | #endif | |
2505 | ||
2506 | static int mmc_rpmb_chrdev_open(struct inode *inode, struct file *filp) | |
2507 | { | |
2508 | struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev, | |
2509 | struct mmc_rpmb_data, chrdev); | |
2510 | ||
2511 | get_device(&rpmb->dev); | |
2512 | filp->private_data = rpmb; | |
1c87f735 | 2513 | mmc_blk_get(rpmb->md->disk); |
97548575 LW |
2514 | |
2515 | return nonseekable_open(inode, filp); | |
2516 | } | |
2517 | ||
2518 | static int mmc_rpmb_chrdev_release(struct inode *inode, struct file *filp) | |
2519 | { | |
2520 | struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev, | |
2521 | struct mmc_rpmb_data, chrdev); | |
2522 | ||
2523 | put_device(&rpmb->dev); | |
1c87f735 | 2524 | mmc_blk_put(rpmb->md); |
97548575 LW |
2525 | |
2526 | return 0; | |
2527 | } | |
2528 | ||
2529 | static const struct file_operations mmc_rpmb_fileops = { | |
2530 | .release = mmc_rpmb_chrdev_release, | |
2531 | .open = mmc_rpmb_chrdev_open, | |
2532 | .owner = THIS_MODULE, | |
2533 | .llseek = no_llseek, | |
2534 | .unlocked_ioctl = mmc_rpmb_ioctl, | |
2535 | #ifdef CONFIG_COMPAT | |
2536 | .compat_ioctl = mmc_rpmb_ioctl_compat, | |
2537 | #endif | |
2538 | }; | |
2539 | ||
1c87f735 LW |
2540 | static void mmc_blk_rpmb_device_release(struct device *dev) |
2541 | { | |
2542 | struct mmc_rpmb_data *rpmb = dev_get_drvdata(dev); | |
2543 | ||
2544 | ida_simple_remove(&mmc_rpmb_ida, rpmb->id); | |
2545 | kfree(rpmb); | |
2546 | } | |
97548575 LW |
2547 | |
2548 | static int mmc_blk_alloc_rpmb_part(struct mmc_card *card, | |
2549 | struct mmc_blk_data *md, | |
2550 | unsigned int part_index, | |
2551 | sector_t size, | |
2552 | const char *subname) | |
2553 | { | |
2554 | int devidx, ret; | |
2555 | char rpmb_name[DISK_NAME_LEN]; | |
2556 | char cap_str[10]; | |
2557 | struct mmc_rpmb_data *rpmb; | |
2558 | ||
2559 | /* This creates the minor number for the RPMB char device */ | |
2560 | devidx = ida_simple_get(&mmc_rpmb_ida, 0, max_devices, GFP_KERNEL); | |
2561 | if (devidx < 0) | |
2562 | return devidx; | |
2563 | ||
2564 | rpmb = kzalloc(sizeof(*rpmb), GFP_KERNEL); | |
1c87f735 LW |
2565 | if (!rpmb) { |
2566 | ida_simple_remove(&mmc_rpmb_ida, devidx); | |
97548575 | 2567 | return -ENOMEM; |
1c87f735 | 2568 | } |
97548575 LW |
2569 | |
2570 | snprintf(rpmb_name, sizeof(rpmb_name), | |
2571 | "mmcblk%u%s", card->host->index, subname ? subname : ""); | |
2572 | ||
2573 | rpmb->id = devidx; | |
2574 | rpmb->part_index = part_index; | |
2575 | rpmb->dev.init_name = rpmb_name; | |
2576 | rpmb->dev.bus = &mmc_rpmb_bus_type; | |
2577 | rpmb->dev.devt = MKDEV(MAJOR(mmc_rpmb_devt), rpmb->id); | |
2578 | rpmb->dev.parent = &card->dev; | |
1c87f735 | 2579 | rpmb->dev.release = mmc_blk_rpmb_device_release; |
97548575 LW |
2580 | device_initialize(&rpmb->dev); |
2581 | dev_set_drvdata(&rpmb->dev, rpmb); | |
2582 | rpmb->md = md; | |
2583 | ||
2584 | cdev_init(&rpmb->chrdev, &mmc_rpmb_fileops); | |
2585 | rpmb->chrdev.owner = THIS_MODULE; | |
2586 | ret = cdev_device_add(&rpmb->chrdev, &rpmb->dev); | |
2587 | if (ret) { | |
2588 | pr_err("%s: could not add character device\n", rpmb_name); | |
1c87f735 | 2589 | goto out_put_device; |
97548575 LW |
2590 | } |
2591 | ||
2592 | list_add(&rpmb->node, &md->rpmbs); | |
2593 | ||
2594 | string_get_size((u64)size, 512, STRING_UNITS_2, | |
2595 | cap_str, sizeof(cap_str)); | |
2596 | ||
2597 | pr_info("%s: %s %s partition %u %s, chardev (%d:%d)\n", | |
2598 | rpmb_name, mmc_card_id(card), | |
2599 | mmc_card_name(card), EXT_CSD_PART_CONFIG_ACC_RPMB, cap_str, | |
2600 | MAJOR(mmc_rpmb_devt), rpmb->id); | |
2601 | ||
2602 | return 0; | |
2603 | ||
1c87f735 LW |
2604 | out_put_device: |
2605 | put_device(&rpmb->dev); | |
97548575 LW |
2606 | return ret; |
2607 | } | |
2608 | ||
2609 | static void mmc_blk_remove_rpmb_part(struct mmc_rpmb_data *rpmb) | |
1c87f735 | 2610 | |
97548575 LW |
2611 | { |
2612 | cdev_device_del(&rpmb->chrdev, &rpmb->dev); | |
1c87f735 | 2613 | put_device(&rpmb->dev); |
97548575 LW |
2614 | } |
2615 | ||
e0c368d5 NJ |
2616 | /* MMC Physical partitions consist of two boot partitions and |
2617 | * up to four general purpose partitions. | |
2618 | * For each partition enabled in EXT_CSD a block device will be allocatedi | |
2619 | * to provide access to the partition. | |
2620 | */ | |
2621 | ||
371a689f AW |
2622 | static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md) |
2623 | { | |
97548575 | 2624 | int idx, ret; |
371a689f AW |
2625 | |
2626 | if (!mmc_card_mmc(card)) | |
2627 | return 0; | |
2628 | ||
e0c368d5 | 2629 | for (idx = 0; idx < card->nr_parts; idx++) { |
97548575 LW |
2630 | if (card->part[idx].area_type & MMC_BLK_DATA_AREA_RPMB) { |
2631 | /* | |
2632 | * RPMB partitions does not provide block access, they | |
2633 | * are only accessed using ioctl():s. Thus create | |
2634 | * special RPMB block devices that do not have a | |
2635 | * backing block queue for these. | |
2636 | */ | |
2637 | ret = mmc_blk_alloc_rpmb_part(card, md, | |
2638 | card->part[idx].part_cfg, | |
2639 | card->part[idx].size >> 9, | |
2640 | card->part[idx].name); | |
2641 | if (ret) | |
2642 | return ret; | |
2643 | } else if (card->part[idx].size) { | |
e0c368d5 NJ |
2644 | ret = mmc_blk_alloc_part(card, md, |
2645 | card->part[idx].part_cfg, | |
2646 | card->part[idx].size >> 9, | |
2647 | card->part[idx].force_ro, | |
add710ea JR |
2648 | card->part[idx].name, |
2649 | card->part[idx].area_type); | |
e0c368d5 NJ |
2650 | if (ret) |
2651 | return ret; | |
2652 | } | |
371a689f AW |
2653 | } |
2654 | ||
97548575 | 2655 | return 0; |
1da177e4 LT |
2656 | } |
2657 | ||
371a689f AW |
2658 | static void mmc_blk_remove_req(struct mmc_blk_data *md) |
2659 | { | |
add710ea JR |
2660 | struct mmc_card *card; |
2661 | ||
371a689f | 2662 | if (md) { |
fdfa20c1 PT |
2663 | /* |
2664 | * Flush remaining requests and free queues. It | |
2665 | * is freeing the queue that stops new requests | |
2666 | * from being accepted. | |
2667 | */ | |
8efb83a2 | 2668 | card = md->queue.card; |
fdfa20c1 | 2669 | mmc_cleanup_queue(&md->queue); |
371a689f AW |
2670 | if (md->disk->flags & GENHD_FL_UP) { |
2671 | device_remove_file(disk_to_dev(md->disk), &md->force_ro); | |
add710ea JR |
2672 | if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) && |
2673 | card->ext_csd.boot_ro_lockable) | |
2674 | device_remove_file(disk_to_dev(md->disk), | |
2675 | &md->power_ro_lock); | |
371a689f | 2676 | |
371a689f AW |
2677 | del_gendisk(md->disk); |
2678 | } | |
371a689f AW |
2679 | mmc_blk_put(md); |
2680 | } | |
2681 | } | |
2682 | ||
2683 | static void mmc_blk_remove_parts(struct mmc_card *card, | |
2684 | struct mmc_blk_data *md) | |
2685 | { | |
2686 | struct list_head *pos, *q; | |
2687 | struct mmc_blk_data *part_md; | |
97548575 | 2688 | struct mmc_rpmb_data *rpmb; |
371a689f | 2689 | |
97548575 LW |
2690 | /* Remove RPMB partitions */ |
2691 | list_for_each_safe(pos, q, &md->rpmbs) { | |
2692 | rpmb = list_entry(pos, struct mmc_rpmb_data, node); | |
2693 | list_del(pos); | |
2694 | mmc_blk_remove_rpmb_part(rpmb); | |
2695 | } | |
2696 | /* Remove block partitions */ | |
371a689f AW |
2697 | list_for_each_safe(pos, q, &md->part) { |
2698 | part_md = list_entry(pos, struct mmc_blk_data, part); | |
2699 | list_del(pos); | |
2700 | mmc_blk_remove_req(part_md); | |
2701 | } | |
2702 | } | |
2703 | ||
2704 | static int mmc_add_disk(struct mmc_blk_data *md) | |
2705 | { | |
2706 | int ret; | |
add710ea | 2707 | struct mmc_card *card = md->queue.card; |
371a689f | 2708 | |
307d8e6f | 2709 | device_add_disk(md->parent, md->disk); |
371a689f AW |
2710 | md->force_ro.show = force_ro_show; |
2711 | md->force_ro.store = force_ro_store; | |
641c3187 | 2712 | sysfs_attr_init(&md->force_ro.attr); |
371a689f AW |
2713 | md->force_ro.attr.name = "force_ro"; |
2714 | md->force_ro.attr.mode = S_IRUGO | S_IWUSR; | |
2715 | ret = device_create_file(disk_to_dev(md->disk), &md->force_ro); | |
2716 | if (ret) | |
add710ea JR |
2717 | goto force_ro_fail; |
2718 | ||
2719 | if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) && | |
2720 | card->ext_csd.boot_ro_lockable) { | |
88187398 | 2721 | umode_t mode; |
add710ea JR |
2722 | |
2723 | if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS) | |
2724 | mode = S_IRUGO; | |
2725 | else | |
2726 | mode = S_IRUGO | S_IWUSR; | |
2727 | ||
2728 | md->power_ro_lock.show = power_ro_lock_show; | |
2729 | md->power_ro_lock.store = power_ro_lock_store; | |
00d9ac08 | 2730 | sysfs_attr_init(&md->power_ro_lock.attr); |
add710ea JR |
2731 | md->power_ro_lock.attr.mode = mode; |
2732 | md->power_ro_lock.attr.name = | |
2733 | "ro_lock_until_next_power_on"; | |
2734 | ret = device_create_file(disk_to_dev(md->disk), | |
2735 | &md->power_ro_lock); | |
2736 | if (ret) | |
2737 | goto power_ro_lock_fail; | |
2738 | } | |
2739 | return ret; | |
2740 | ||
2741 | power_ro_lock_fail: | |
2742 | device_remove_file(disk_to_dev(md->disk), &md->force_ro); | |
2743 | force_ro_fail: | |
2744 | del_gendisk(md->disk); | |
371a689f AW |
2745 | |
2746 | return ret; | |
2747 | } | |
2748 | ||
627c3ccf LW |
2749 | #ifdef CONFIG_DEBUG_FS |
2750 | ||
2751 | static int mmc_dbg_card_status_get(void *data, u64 *val) | |
2752 | { | |
2753 | struct mmc_card *card = data; | |
2754 | struct mmc_blk_data *md = dev_get_drvdata(&card->dev); | |
2755 | struct mmc_queue *mq = &md->queue; | |
2756 | struct request *req; | |
2757 | int ret; | |
2758 | ||
2759 | /* Ask the block layer about the card status */ | |
2760 | req = blk_get_request(mq->queue, REQ_OP_DRV_IN, __GFP_RECLAIM); | |
fb8e456e AH |
2761 | if (IS_ERR(req)) |
2762 | return PTR_ERR(req); | |
627c3ccf LW |
2763 | req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS; |
2764 | blk_execute_rq(mq->queue, NULL, req, 0); | |
2765 | ret = req_to_mmc_queue_req(req)->drv_op_result; | |
2766 | if (ret >= 0) { | |
2767 | *val = ret; | |
2768 | ret = 0; | |
2769 | } | |
34c089e8 | 2770 | blk_put_request(req); |
627c3ccf LW |
2771 | |
2772 | return ret; | |
2773 | } | |
2774 | DEFINE_SIMPLE_ATTRIBUTE(mmc_dbg_card_status_fops, mmc_dbg_card_status_get, | |
2775 | NULL, "%08llx\n"); | |
2776 | ||
2777 | /* That is two digits * 512 + 1 for newline */ | |
2778 | #define EXT_CSD_STR_LEN 1025 | |
2779 | ||
2780 | static int mmc_ext_csd_open(struct inode *inode, struct file *filp) | |
2781 | { | |
2782 | struct mmc_card *card = inode->i_private; | |
2783 | struct mmc_blk_data *md = dev_get_drvdata(&card->dev); | |
2784 | struct mmc_queue *mq = &md->queue; | |
2785 | struct request *req; | |
2786 | char *buf; | |
2787 | ssize_t n = 0; | |
2788 | u8 *ext_csd; | |
2789 | int err, i; | |
2790 | ||
2791 | buf = kmalloc(EXT_CSD_STR_LEN + 1, GFP_KERNEL); | |
2792 | if (!buf) | |
2793 | return -ENOMEM; | |
2794 | ||
2795 | /* Ask the block layer for the EXT CSD */ | |
2796 | req = blk_get_request(mq->queue, REQ_OP_DRV_IN, __GFP_RECLAIM); | |
fb8e456e AH |
2797 | if (IS_ERR(req)) { |
2798 | err = PTR_ERR(req); | |
2799 | goto out_free; | |
2800 | } | |
627c3ccf LW |
2801 | req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_EXT_CSD; |
2802 | req_to_mmc_queue_req(req)->drv_op_data = &ext_csd; | |
2803 | blk_execute_rq(mq->queue, NULL, req, 0); | |
2804 | err = req_to_mmc_queue_req(req)->drv_op_result; | |
34c089e8 | 2805 | blk_put_request(req); |
627c3ccf LW |
2806 | if (err) { |
2807 | pr_err("FAILED %d\n", err); | |
2808 | goto out_free; | |
2809 | } | |
2810 | ||
2811 | for (i = 0; i < 512; i++) | |
2812 | n += sprintf(buf + n, "%02x", ext_csd[i]); | |
2813 | n += sprintf(buf + n, "\n"); | |
2814 | ||
2815 | if (n != EXT_CSD_STR_LEN) { | |
2816 | err = -EINVAL; | |
0be55579 | 2817 | kfree(ext_csd); |
627c3ccf LW |
2818 | goto out_free; |
2819 | } | |
2820 | ||
2821 | filp->private_data = buf; | |
2822 | kfree(ext_csd); | |
2823 | return 0; | |
2824 | ||
2825 | out_free: | |
2826 | kfree(buf); | |
2827 | return err; | |
2828 | } | |
2829 | ||
2830 | static ssize_t mmc_ext_csd_read(struct file *filp, char __user *ubuf, | |
2831 | size_t cnt, loff_t *ppos) | |
2832 | { | |
2833 | char *buf = filp->private_data; | |
2834 | ||
2835 | return simple_read_from_buffer(ubuf, cnt, ppos, | |
2836 | buf, EXT_CSD_STR_LEN); | |
2837 | } | |
2838 | ||
2839 | static int mmc_ext_csd_release(struct inode *inode, struct file *file) | |
2840 | { | |
2841 | kfree(file->private_data); | |
2842 | return 0; | |
2843 | } | |
2844 | ||
2845 | static const struct file_operations mmc_dbg_ext_csd_fops = { | |
2846 | .open = mmc_ext_csd_open, | |
2847 | .read = mmc_ext_csd_read, | |
2848 | .release = mmc_ext_csd_release, | |
2849 | .llseek = default_llseek, | |
2850 | }; | |
2851 | ||
f9f0da98 | 2852 | static int mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md) |
627c3ccf LW |
2853 | { |
2854 | struct dentry *root; | |
2855 | ||
2856 | if (!card->debugfs_root) | |
2857 | return 0; | |
2858 | ||
2859 | root = card->debugfs_root; | |
2860 | ||
2861 | if (mmc_card_mmc(card) || mmc_card_sd(card)) { | |
f9f0da98 AH |
2862 | md->status_dentry = |
2863 | debugfs_create_file("status", S_IRUSR, root, card, | |
2864 | &mmc_dbg_card_status_fops); | |
2865 | if (!md->status_dentry) | |
627c3ccf LW |
2866 | return -EIO; |
2867 | } | |
2868 | ||
2869 | if (mmc_card_mmc(card)) { | |
f9f0da98 AH |
2870 | md->ext_csd_dentry = |
2871 | debugfs_create_file("ext_csd", S_IRUSR, root, card, | |
2872 | &mmc_dbg_ext_csd_fops); | |
2873 | if (!md->ext_csd_dentry) | |
627c3ccf LW |
2874 | return -EIO; |
2875 | } | |
2876 | ||
2877 | return 0; | |
2878 | } | |
2879 | ||
f9f0da98 AH |
2880 | static void mmc_blk_remove_debugfs(struct mmc_card *card, |
2881 | struct mmc_blk_data *md) | |
2882 | { | |
2883 | if (!card->debugfs_root) | |
2884 | return; | |
2885 | ||
2886 | if (!IS_ERR_OR_NULL(md->status_dentry)) { | |
2887 | debugfs_remove(md->status_dentry); | |
2888 | md->status_dentry = NULL; | |
2889 | } | |
2890 | ||
2891 | if (!IS_ERR_OR_NULL(md->ext_csd_dentry)) { | |
2892 | debugfs_remove(md->ext_csd_dentry); | |
2893 | md->ext_csd_dentry = NULL; | |
2894 | } | |
2895 | } | |
627c3ccf LW |
2896 | |
2897 | #else | |
2898 | ||
f9f0da98 | 2899 | static int mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md) |
627c3ccf LW |
2900 | { |
2901 | return 0; | |
2902 | } | |
2903 | ||
f9f0da98 AH |
2904 | static void mmc_blk_remove_debugfs(struct mmc_card *card, |
2905 | struct mmc_blk_data *md) | |
2906 | { | |
2907 | } | |
2908 | ||
627c3ccf LW |
2909 | #endif /* CONFIG_DEBUG_FS */ |
2910 | ||
96541bac | 2911 | static int mmc_blk_probe(struct mmc_card *card) |
1da177e4 | 2912 | { |
371a689f | 2913 | struct mmc_blk_data *md, *part_md; |
a7bbb573 PO |
2914 | char cap_str[10]; |
2915 | ||
912490db PO |
2916 | /* |
2917 | * Check that the card supports the command class(es) we need. | |
2918 | */ | |
2919 | if (!(card->csd.cmdclass & CCC_BLOCK_READ)) | |
1da177e4 LT |
2920 | return -ENODEV; |
2921 | ||
8c7cdbf9 | 2922 | mmc_fixup_device(card, mmc_blk_fixups); |
5204d00f | 2923 | |
1da177e4 | 2924 | md = mmc_blk_alloc(card); |
304419d8 | 2925 | if (IS_ERR(md)) |
1da177e4 LT |
2926 | return PTR_ERR(md); |
2927 | ||
b9f28d86 | 2928 | string_get_size((u64)get_capacity(md->disk), 512, STRING_UNITS_2, |
a7bbb573 | 2929 | cap_str, sizeof(cap_str)); |
a3c76eb9 | 2930 | pr_info("%s: %s %s %s %s\n", |
1da177e4 | 2931 | md->disk->disk_name, mmc_card_id(card), mmc_card_name(card), |
a7bbb573 | 2932 | cap_str, md->read_only ? "(ro)" : ""); |
1da177e4 | 2933 | |
371a689f AW |
2934 | if (mmc_blk_alloc_parts(card, md)) |
2935 | goto out; | |
2936 | ||
96541bac | 2937 | dev_set_drvdata(&card->dev, md); |
6f60c222 | 2938 | |
371a689f AW |
2939 | if (mmc_add_disk(md)) |
2940 | goto out; | |
2941 | ||
2942 | list_for_each_entry(part_md, &md->part, part) { | |
2943 | if (mmc_add_disk(part_md)) | |
2944 | goto out; | |
2945 | } | |
e94cfef6 | 2946 | |
627c3ccf | 2947 | /* Add two debugfs entries */ |
f9f0da98 | 2948 | mmc_blk_add_debugfs(card, md); |
627c3ccf | 2949 | |
e94cfef6 UH |
2950 | pm_runtime_set_autosuspend_delay(&card->dev, 3000); |
2951 | pm_runtime_use_autosuspend(&card->dev); | |
2952 | ||
2953 | /* | |
2954 | * Don't enable runtime PM for SD-combo cards here. Leave that | |
2955 | * decision to be taken during the SDIO init sequence instead. | |
2956 | */ | |
2957 | if (card->type != MMC_TYPE_SD_COMBO) { | |
2958 | pm_runtime_set_active(&card->dev); | |
2959 | pm_runtime_enable(&card->dev); | |
2960 | } | |
2961 | ||
1da177e4 LT |
2962 | return 0; |
2963 | ||
2964 | out: | |
371a689f AW |
2965 | mmc_blk_remove_parts(card, md); |
2966 | mmc_blk_remove_req(md); | |
5865f287 | 2967 | return 0; |
1da177e4 LT |
2968 | } |
2969 | ||
96541bac | 2970 | static void mmc_blk_remove(struct mmc_card *card) |
1da177e4 | 2971 | { |
96541bac | 2972 | struct mmc_blk_data *md = dev_get_drvdata(&card->dev); |
1da177e4 | 2973 | |
f9f0da98 | 2974 | mmc_blk_remove_debugfs(card, md); |
371a689f | 2975 | mmc_blk_remove_parts(card, md); |
e94cfef6 | 2976 | pm_runtime_get_sync(&card->dev); |
ddd6fa7e | 2977 | mmc_claim_host(card->host); |
1f797edc | 2978 | mmc_blk_part_switch(card, md->part_type); |
ddd6fa7e | 2979 | mmc_release_host(card->host); |
e94cfef6 UH |
2980 | if (card->type != MMC_TYPE_SD_COMBO) |
2981 | pm_runtime_disable(&card->dev); | |
2982 | pm_runtime_put_noidle(&card->dev); | |
371a689f | 2983 | mmc_blk_remove_req(md); |
96541bac | 2984 | dev_set_drvdata(&card->dev, NULL); |
1da177e4 LT |
2985 | } |
2986 | ||
96541bac | 2987 | static int _mmc_blk_suspend(struct mmc_card *card) |
1da177e4 | 2988 | { |
371a689f | 2989 | struct mmc_blk_data *part_md; |
96541bac | 2990 | struct mmc_blk_data *md = dev_get_drvdata(&card->dev); |
1da177e4 LT |
2991 | |
2992 | if (md) { | |
2993 | mmc_queue_suspend(&md->queue); | |
371a689f AW |
2994 | list_for_each_entry(part_md, &md->part, part) { |
2995 | mmc_queue_suspend(&part_md->queue); | |
2996 | } | |
1da177e4 LT |
2997 | } |
2998 | return 0; | |
2999 | } | |
3000 | ||
96541bac | 3001 | static void mmc_blk_shutdown(struct mmc_card *card) |
76287748 | 3002 | { |
96541bac | 3003 | _mmc_blk_suspend(card); |
76287748 UH |
3004 | } |
3005 | ||
0967edc6 UH |
3006 | #ifdef CONFIG_PM_SLEEP |
3007 | static int mmc_blk_suspend(struct device *dev) | |
76287748 | 3008 | { |
96541bac UH |
3009 | struct mmc_card *card = mmc_dev_to_card(dev); |
3010 | ||
3011 | return _mmc_blk_suspend(card); | |
76287748 UH |
3012 | } |
3013 | ||
0967edc6 | 3014 | static int mmc_blk_resume(struct device *dev) |
1da177e4 | 3015 | { |
371a689f | 3016 | struct mmc_blk_data *part_md; |
fc95e30b | 3017 | struct mmc_blk_data *md = dev_get_drvdata(dev); |
1da177e4 LT |
3018 | |
3019 | if (md) { | |
371a689f AW |
3020 | /* |
3021 | * Resume involves the card going into idle state, | |
3022 | * so current partition is always the main one. | |
3023 | */ | |
3024 | md->part_curr = md->part_type; | |
1da177e4 | 3025 | mmc_queue_resume(&md->queue); |
371a689f AW |
3026 | list_for_each_entry(part_md, &md->part, part) { |
3027 | mmc_queue_resume(&part_md->queue); | |
3028 | } | |
1da177e4 LT |
3029 | } |
3030 | return 0; | |
3031 | } | |
1da177e4 LT |
3032 | #endif |
3033 | ||
0967edc6 UH |
3034 | static SIMPLE_DEV_PM_OPS(mmc_blk_pm_ops, mmc_blk_suspend, mmc_blk_resume); |
3035 | ||
96541bac UH |
3036 | static struct mmc_driver mmc_driver = { |
3037 | .drv = { | |
3038 | .name = "mmcblk", | |
3039 | .pm = &mmc_blk_pm_ops, | |
3040 | }, | |
1da177e4 LT |
3041 | .probe = mmc_blk_probe, |
3042 | .remove = mmc_blk_remove, | |
76287748 | 3043 | .shutdown = mmc_blk_shutdown, |
1da177e4 LT |
3044 | }; |
3045 | ||
3046 | static int __init mmc_blk_init(void) | |
3047 | { | |
9d4e98e9 | 3048 | int res; |
1da177e4 | 3049 | |
97548575 LW |
3050 | res = bus_register(&mmc_rpmb_bus_type); |
3051 | if (res < 0) { | |
3052 | pr_err("mmcblk: could not register RPMB bus type\n"); | |
3053 | return res; | |
3054 | } | |
3055 | res = alloc_chrdev_region(&mmc_rpmb_devt, 0, MAX_DEVICES, "rpmb"); | |
3056 | if (res < 0) { | |
3057 | pr_err("mmcblk: failed to allocate rpmb chrdev region\n"); | |
3058 | goto out_bus_unreg; | |
3059 | } | |
3060 | ||
5e71b7a6 OJ |
3061 | if (perdev_minors != CONFIG_MMC_BLOCK_MINORS) |
3062 | pr_info("mmcblk: using %d minors per device\n", perdev_minors); | |
3063 | ||
a26eba61 | 3064 | max_devices = min(MAX_DEVICES, (1 << MINORBITS) / perdev_minors); |
5e71b7a6 | 3065 | |
fe6b4c88 PO |
3066 | res = register_blkdev(MMC_BLOCK_MAJOR, "mmc"); |
3067 | if (res) | |
97548575 | 3068 | goto out_chrdev_unreg; |
1da177e4 | 3069 | |
9d4e98e9 AM |
3070 | res = mmc_register_driver(&mmc_driver); |
3071 | if (res) | |
97548575 | 3072 | goto out_blkdev_unreg; |
1da177e4 | 3073 | |
9d4e98e9 | 3074 | return 0; |
97548575 LW |
3075 | |
3076 | out_blkdev_unreg: | |
9d4e98e9 | 3077 | unregister_blkdev(MMC_BLOCK_MAJOR, "mmc"); |
97548575 LW |
3078 | out_chrdev_unreg: |
3079 | unregister_chrdev_region(mmc_rpmb_devt, MAX_DEVICES); | |
3080 | out_bus_unreg: | |
3081 | bus_unregister(&mmc_rpmb_bus_type); | |
1da177e4 LT |
3082 | return res; |
3083 | } | |
3084 | ||
3085 | static void __exit mmc_blk_exit(void) | |
3086 | { | |
3087 | mmc_unregister_driver(&mmc_driver); | |
fe6b4c88 | 3088 | unregister_blkdev(MMC_BLOCK_MAJOR, "mmc"); |
97548575 | 3089 | unregister_chrdev_region(mmc_rpmb_devt, MAX_DEVICES); |
1da177e4 LT |
3090 | } |
3091 | ||
3092 | module_init(mmc_blk_init); | |
3093 | module_exit(mmc_blk_exit); | |
3094 | ||
3095 | MODULE_LICENSE("GPL"); | |
3096 | MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver"); | |
3097 |