]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Block driver for media (i.e., flash cards) | |
3 | * | |
4 | * Copyright 2002 Hewlett-Packard Company | |
979ce720 | 5 | * Copyright 2005-2008 Pierre Ossman |
1da177e4 LT |
6 | * |
7 | * Use consistent with the GNU GPL is permitted, | |
8 | * provided that this copyright notice is | |
9 | * preserved in its entirety in all copies and derived works. | |
10 | * | |
11 | * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED, | |
12 | * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS | |
13 | * FITNESS FOR ANY PARTICULAR PURPOSE. | |
14 | * | |
15 | * Many thanks to Alessandro Rubini and Jonathan Corbet! | |
16 | * | |
17 | * Author: Andrew Christian | |
18 | * 28 May 2002 | |
19 | */ | |
20 | #include <linux/moduleparam.h> | |
21 | #include <linux/module.h> | |
22 | #include <linux/init.h> | |
23 | ||
1da177e4 LT |
24 | #include <linux/kernel.h> |
25 | #include <linux/fs.h> | |
5a0e3ad6 | 26 | #include <linux/slab.h> |
1da177e4 LT |
27 | #include <linux/errno.h> |
28 | #include <linux/hdreg.h> | |
29 | #include <linux/kdev_t.h> | |
30 | #include <linux/blkdev.h> | |
a621aaed | 31 | #include <linux/mutex.h> |
ec5a19dd | 32 | #include <linux/scatterlist.h> |
a7bbb573 | 33 | #include <linux/string_helpers.h> |
cb87ea28 JC |
34 | #include <linux/delay.h> |
35 | #include <linux/capability.h> | |
36 | #include <linux/compat.h> | |
1da177e4 | 37 | |
cb87ea28 | 38 | #include <linux/mmc/ioctl.h> |
1da177e4 | 39 | #include <linux/mmc/card.h> |
385e3227 | 40 | #include <linux/mmc/host.h> |
da7fbe58 PO |
41 | #include <linux/mmc/mmc.h> |
42 | #include <linux/mmc/sd.h> | |
1da177e4 LT |
43 | |
44 | #include <asm/system.h> | |
45 | #include <asm/uaccess.h> | |
46 | ||
98ac2162 | 47 | #include "queue.h" |
1da177e4 | 48 | |
6b0b6285 | 49 | MODULE_ALIAS("mmc:block"); |
5e71b7a6 OJ |
50 | #ifdef MODULE_PARAM_PREFIX |
51 | #undef MODULE_PARAM_PREFIX | |
52 | #endif | |
53 | #define MODULE_PARAM_PREFIX "mmcblk." | |
54 | ||
6a7a6b45 AW |
55 | #define INAND_CMD38_ARG_EXT_CSD 113 |
56 | #define INAND_CMD38_ARG_ERASE 0x00 | |
57 | #define INAND_CMD38_ARG_TRIM 0x01 | |
58 | #define INAND_CMD38_ARG_SECERASE 0x80 | |
59 | #define INAND_CMD38_ARG_SECTRIM1 0x81 | |
60 | #define INAND_CMD38_ARG_SECTRIM2 0x88 | |
61 | ||
5e71b7a6 | 62 | static DEFINE_MUTEX(block_mutex); |
6b0b6285 | 63 | |
1da177e4 | 64 | /* |
5e71b7a6 OJ |
65 | * The defaults come from config options but can be overriden by module |
66 | * or bootarg options. | |
1da177e4 | 67 | */ |
5e71b7a6 | 68 | static int perdev_minors = CONFIG_MMC_BLOCK_MINORS; |
1dff3144 | 69 | |
5e71b7a6 OJ |
70 | /* |
71 | * We've only got one major, so number of mmcblk devices is | |
72 | * limited to 256 / number of minors per device. | |
73 | */ | |
74 | static int max_devices; | |
75 | ||
76 | /* 256 minors, so at most 256 separate devices */ | |
77 | static DECLARE_BITMAP(dev_use, 256); | |
f06c9153 | 78 | static DECLARE_BITMAP(name_use, 256); |
1da177e4 | 79 | |
1da177e4 LT |
80 | /* |
81 | * There is one mmc_blk_data per slot. | |
82 | */ | |
83 | struct mmc_blk_data { | |
84 | spinlock_t lock; | |
85 | struct gendisk *disk; | |
86 | struct mmc_queue queue; | |
371a689f | 87 | struct list_head part; |
1da177e4 | 88 | |
d0c97cfb AW |
89 | unsigned int flags; |
90 | #define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */ | |
91 | #define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */ | |
92 | ||
1da177e4 | 93 | unsigned int usage; |
a6f6c96b | 94 | unsigned int read_only; |
371a689f | 95 | unsigned int part_type; |
f06c9153 | 96 | unsigned int name_idx; |
371a689f AW |
97 | |
98 | /* | |
99 | * Only set in main mmc_blk_data associated | |
100 | * with mmc_card with mmc_set_drvdata, and keeps | |
101 | * track of the current selected device partition. | |
102 | */ | |
103 | unsigned int part_curr; | |
104 | struct device_attribute force_ro; | |
1da177e4 LT |
105 | }; |
106 | ||
a621aaed | 107 | static DEFINE_MUTEX(open_lock); |
1da177e4 | 108 | |
d78d4a8a PF |
109 | enum mmc_blk_status { |
110 | MMC_BLK_SUCCESS = 0, | |
111 | MMC_BLK_PARTIAL, | |
112 | MMC_BLK_RETRY, | |
113 | MMC_BLK_RETRY_SINGLE, | |
114 | MMC_BLK_DATA_ERR, | |
115 | MMC_BLK_CMD_ERR, | |
116 | MMC_BLK_ABORT, | |
117 | }; | |
118 | ||
5e71b7a6 OJ |
119 | module_param(perdev_minors, int, 0444); |
120 | MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device"); | |
121 | ||
1da177e4 LT |
122 | static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk) |
123 | { | |
124 | struct mmc_blk_data *md; | |
125 | ||
a621aaed | 126 | mutex_lock(&open_lock); |
1da177e4 LT |
127 | md = disk->private_data; |
128 | if (md && md->usage == 0) | |
129 | md = NULL; | |
130 | if (md) | |
131 | md->usage++; | |
a621aaed | 132 | mutex_unlock(&open_lock); |
1da177e4 LT |
133 | |
134 | return md; | |
135 | } | |
136 | ||
371a689f AW |
137 | static inline int mmc_get_devidx(struct gendisk *disk) |
138 | { | |
139 | int devmaj = MAJOR(disk_devt(disk)); | |
140 | int devidx = MINOR(disk_devt(disk)) / perdev_minors; | |
141 | ||
142 | if (!devmaj) | |
143 | devidx = disk->first_minor / perdev_minors; | |
144 | return devidx; | |
145 | } | |
146 | ||
1da177e4 LT |
147 | static void mmc_blk_put(struct mmc_blk_data *md) |
148 | { | |
a621aaed | 149 | mutex_lock(&open_lock); |
1da177e4 LT |
150 | md->usage--; |
151 | if (md->usage == 0) { | |
371a689f | 152 | int devidx = mmc_get_devidx(md->disk); |
5fa83ce2 AH |
153 | blk_cleanup_queue(md->queue.queue); |
154 | ||
1dff3144 DW |
155 | __clear_bit(devidx, dev_use); |
156 | ||
1da177e4 | 157 | put_disk(md->disk); |
1da177e4 LT |
158 | kfree(md); |
159 | } | |
a621aaed | 160 | mutex_unlock(&open_lock); |
1da177e4 LT |
161 | } |
162 | ||
371a689f AW |
163 | static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr, |
164 | char *buf) | |
165 | { | |
166 | int ret; | |
167 | struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); | |
168 | ||
169 | ret = snprintf(buf, PAGE_SIZE, "%d", | |
170 | get_disk_ro(dev_to_disk(dev)) ^ | |
171 | md->read_only); | |
172 | mmc_blk_put(md); | |
173 | return ret; | |
174 | } | |
175 | ||
176 | static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr, | |
177 | const char *buf, size_t count) | |
178 | { | |
179 | int ret; | |
180 | char *end; | |
181 | struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); | |
182 | unsigned long set = simple_strtoul(buf, &end, 0); | |
183 | if (end == buf) { | |
184 | ret = -EINVAL; | |
185 | goto out; | |
186 | } | |
187 | ||
188 | set_disk_ro(dev_to_disk(dev), set || md->read_only); | |
189 | ret = count; | |
190 | out: | |
191 | mmc_blk_put(md); | |
192 | return ret; | |
193 | } | |
194 | ||
a5a1561f | 195 | static int mmc_blk_open(struct block_device *bdev, fmode_t mode) |
1da177e4 | 196 | { |
a5a1561f | 197 | struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk); |
1da177e4 LT |
198 | int ret = -ENXIO; |
199 | ||
2a48fc0a | 200 | mutex_lock(&block_mutex); |
1da177e4 LT |
201 | if (md) { |
202 | if (md->usage == 2) | |
a5a1561f | 203 | check_disk_change(bdev); |
1da177e4 | 204 | ret = 0; |
a00fc090 | 205 | |
a5a1561f | 206 | if ((mode & FMODE_WRITE) && md->read_only) { |
70bb0896 | 207 | mmc_blk_put(md); |
a00fc090 | 208 | ret = -EROFS; |
70bb0896 | 209 | } |
1da177e4 | 210 | } |
2a48fc0a | 211 | mutex_unlock(&block_mutex); |
1da177e4 LT |
212 | |
213 | return ret; | |
214 | } | |
215 | ||
a5a1561f | 216 | static int mmc_blk_release(struct gendisk *disk, fmode_t mode) |
1da177e4 | 217 | { |
a5a1561f | 218 | struct mmc_blk_data *md = disk->private_data; |
1da177e4 | 219 | |
2a48fc0a | 220 | mutex_lock(&block_mutex); |
1da177e4 | 221 | mmc_blk_put(md); |
2a48fc0a | 222 | mutex_unlock(&block_mutex); |
1da177e4 LT |
223 | return 0; |
224 | } | |
225 | ||
226 | static int | |
a885c8c4 | 227 | mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) |
1da177e4 | 228 | { |
a885c8c4 CH |
229 | geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16); |
230 | geo->heads = 4; | |
231 | geo->sectors = 16; | |
232 | return 0; | |
1da177e4 LT |
233 | } |
234 | ||
cb87ea28 JC |
235 | struct mmc_blk_ioc_data { |
236 | struct mmc_ioc_cmd ic; | |
237 | unsigned char *buf; | |
238 | u64 buf_bytes; | |
239 | }; | |
240 | ||
241 | static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user( | |
242 | struct mmc_ioc_cmd __user *user) | |
243 | { | |
244 | struct mmc_blk_ioc_data *idata; | |
245 | int err; | |
246 | ||
247 | idata = kzalloc(sizeof(*idata), GFP_KERNEL); | |
248 | if (!idata) { | |
249 | err = -ENOMEM; | |
aea253ec | 250 | goto out; |
cb87ea28 JC |
251 | } |
252 | ||
253 | if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) { | |
254 | err = -EFAULT; | |
aea253ec | 255 | goto idata_err; |
cb87ea28 JC |
256 | } |
257 | ||
258 | idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks; | |
259 | if (idata->buf_bytes > MMC_IOC_MAX_BYTES) { | |
260 | err = -EOVERFLOW; | |
aea253ec | 261 | goto idata_err; |
cb87ea28 JC |
262 | } |
263 | ||
264 | idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL); | |
265 | if (!idata->buf) { | |
266 | err = -ENOMEM; | |
aea253ec | 267 | goto idata_err; |
cb87ea28 JC |
268 | } |
269 | ||
270 | if (copy_from_user(idata->buf, (void __user *)(unsigned long) | |
271 | idata->ic.data_ptr, idata->buf_bytes)) { | |
272 | err = -EFAULT; | |
273 | goto copy_err; | |
274 | } | |
275 | ||
276 | return idata; | |
277 | ||
278 | copy_err: | |
279 | kfree(idata->buf); | |
aea253ec | 280 | idata_err: |
cb87ea28 | 281 | kfree(idata); |
aea253ec | 282 | out: |
cb87ea28 | 283 | return ERR_PTR(err); |
cb87ea28 JC |
284 | } |
285 | ||
286 | static int mmc_blk_ioctl_cmd(struct block_device *bdev, | |
287 | struct mmc_ioc_cmd __user *ic_ptr) | |
288 | { | |
289 | struct mmc_blk_ioc_data *idata; | |
290 | struct mmc_blk_data *md; | |
291 | struct mmc_card *card; | |
292 | struct mmc_command cmd = {0}; | |
293 | struct mmc_data data = {0}; | |
294 | struct mmc_request mrq = {0}; | |
295 | struct scatterlist sg; | |
296 | int err; | |
297 | ||
298 | /* | |
299 | * The caller must have CAP_SYS_RAWIO, and must be calling this on the | |
300 | * whole block device, not on a partition. This prevents overspray | |
301 | * between sibling partitions. | |
302 | */ | |
303 | if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains)) | |
304 | return -EPERM; | |
305 | ||
306 | idata = mmc_blk_ioctl_copy_from_user(ic_ptr); | |
307 | if (IS_ERR(idata)) | |
308 | return PTR_ERR(idata); | |
309 | ||
310 | cmd.opcode = idata->ic.opcode; | |
311 | cmd.arg = idata->ic.arg; | |
312 | cmd.flags = idata->ic.flags; | |
313 | ||
314 | data.sg = &sg; | |
315 | data.sg_len = 1; | |
316 | data.blksz = idata->ic.blksz; | |
317 | data.blocks = idata->ic.blocks; | |
318 | ||
319 | sg_init_one(data.sg, idata->buf, idata->buf_bytes); | |
320 | ||
321 | if (idata->ic.write_flag) | |
322 | data.flags = MMC_DATA_WRITE; | |
323 | else | |
324 | data.flags = MMC_DATA_READ; | |
325 | ||
326 | mrq.cmd = &cmd; | |
327 | mrq.data = &data; | |
328 | ||
329 | md = mmc_blk_get(bdev->bd_disk); | |
330 | if (!md) { | |
331 | err = -EINVAL; | |
332 | goto cmd_done; | |
333 | } | |
334 | ||
335 | card = md->queue.card; | |
336 | if (IS_ERR(card)) { | |
337 | err = PTR_ERR(card); | |
338 | goto cmd_done; | |
339 | } | |
340 | ||
341 | mmc_claim_host(card->host); | |
342 | ||
343 | if (idata->ic.is_acmd) { | |
344 | err = mmc_app_cmd(card->host, card); | |
345 | if (err) | |
346 | goto cmd_rel_host; | |
347 | } | |
348 | ||
349 | /* data.flags must already be set before doing this. */ | |
350 | mmc_set_data_timeout(&data, card); | |
351 | /* Allow overriding the timeout_ns for empirical tuning. */ | |
352 | if (idata->ic.data_timeout_ns) | |
353 | data.timeout_ns = idata->ic.data_timeout_ns; | |
354 | ||
355 | if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) { | |
356 | /* | |
357 | * Pretend this is a data transfer and rely on the host driver | |
358 | * to compute timeout. When all host drivers support | |
359 | * cmd.cmd_timeout for R1B, this can be changed to: | |
360 | * | |
361 | * mrq.data = NULL; | |
362 | * cmd.cmd_timeout = idata->ic.cmd_timeout_ms; | |
363 | */ | |
364 | data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000; | |
365 | } | |
366 | ||
367 | mmc_wait_for_req(card->host, &mrq); | |
368 | ||
369 | if (cmd.error) { | |
370 | dev_err(mmc_dev(card->host), "%s: cmd error %d\n", | |
371 | __func__, cmd.error); | |
372 | err = cmd.error; | |
373 | goto cmd_rel_host; | |
374 | } | |
375 | if (data.error) { | |
376 | dev_err(mmc_dev(card->host), "%s: data error %d\n", | |
377 | __func__, data.error); | |
378 | err = data.error; | |
379 | goto cmd_rel_host; | |
380 | } | |
381 | ||
382 | /* | |
383 | * According to the SD specs, some commands require a delay after | |
384 | * issuing the command. | |
385 | */ | |
386 | if (idata->ic.postsleep_min_us) | |
387 | usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us); | |
388 | ||
389 | if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) { | |
390 | err = -EFAULT; | |
391 | goto cmd_rel_host; | |
392 | } | |
393 | ||
394 | if (!idata->ic.write_flag) { | |
395 | if (copy_to_user((void __user *)(unsigned long) idata->ic.data_ptr, | |
396 | idata->buf, idata->buf_bytes)) { | |
397 | err = -EFAULT; | |
398 | goto cmd_rel_host; | |
399 | } | |
400 | } | |
401 | ||
402 | cmd_rel_host: | |
403 | mmc_release_host(card->host); | |
404 | ||
405 | cmd_done: | |
406 | mmc_blk_put(md); | |
407 | kfree(idata->buf); | |
408 | kfree(idata); | |
409 | return err; | |
410 | } | |
411 | ||
412 | static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode, | |
413 | unsigned int cmd, unsigned long arg) | |
414 | { | |
415 | int ret = -EINVAL; | |
416 | if (cmd == MMC_IOC_CMD) | |
417 | ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg); | |
418 | return ret; | |
419 | } | |
420 | ||
421 | #ifdef CONFIG_COMPAT | |
422 | static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode, | |
423 | unsigned int cmd, unsigned long arg) | |
424 | { | |
425 | return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg)); | |
426 | } | |
427 | #endif | |
428 | ||
83d5cde4 | 429 | static const struct block_device_operations mmc_bdops = { |
a5a1561f AV |
430 | .open = mmc_blk_open, |
431 | .release = mmc_blk_release, | |
a885c8c4 | 432 | .getgeo = mmc_blk_getgeo, |
1da177e4 | 433 | .owner = THIS_MODULE, |
cb87ea28 JC |
434 | .ioctl = mmc_blk_ioctl, |
435 | #ifdef CONFIG_COMPAT | |
436 | .compat_ioctl = mmc_blk_compat_ioctl, | |
437 | #endif | |
1da177e4 LT |
438 | }; |
439 | ||
371a689f AW |
440 | static inline int mmc_blk_part_switch(struct mmc_card *card, |
441 | struct mmc_blk_data *md) | |
442 | { | |
443 | int ret; | |
444 | struct mmc_blk_data *main_md = mmc_get_drvdata(card); | |
445 | if (main_md->part_curr == md->part_type) | |
446 | return 0; | |
447 | ||
448 | if (mmc_card_mmc(card)) { | |
449 | card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK; | |
450 | card->ext_csd.part_config |= md->part_type; | |
451 | ||
452 | ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | |
453 | EXT_CSD_PART_CONFIG, card->ext_csd.part_config, | |
454 | card->ext_csd.part_time); | |
455 | if (ret) | |
456 | return ret; | |
457 | } | |
458 | ||
459 | main_md->part_curr = md->part_type; | |
460 | return 0; | |
461 | } | |
462 | ||
ec5a19dd PO |
463 | static u32 mmc_sd_num_wr_blocks(struct mmc_card *card) |
464 | { | |
465 | int err; | |
051913da BD |
466 | u32 result; |
467 | __be32 *blocks; | |
ec5a19dd | 468 | |
24f5b53b | 469 | struct mmc_request mrq = {0}; |
1278dba1 | 470 | struct mmc_command cmd = {0}; |
a61ad2b4 | 471 | struct mmc_data data = {0}; |
ec5a19dd PO |
472 | unsigned int timeout_us; |
473 | ||
474 | struct scatterlist sg; | |
475 | ||
ec5a19dd PO |
476 | cmd.opcode = MMC_APP_CMD; |
477 | cmd.arg = card->rca << 16; | |
7213d175 | 478 | cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; |
ec5a19dd PO |
479 | |
480 | err = mmc_wait_for_cmd(card->host, &cmd, 0); | |
7213d175 DB |
481 | if (err) |
482 | return (u32)-1; | |
483 | if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD)) | |
ec5a19dd PO |
484 | return (u32)-1; |
485 | ||
486 | memset(&cmd, 0, sizeof(struct mmc_command)); | |
487 | ||
488 | cmd.opcode = SD_APP_SEND_NUM_WR_BLKS; | |
489 | cmd.arg = 0; | |
7213d175 | 490 | cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; |
ec5a19dd | 491 | |
ec5a19dd PO |
492 | data.timeout_ns = card->csd.tacc_ns * 100; |
493 | data.timeout_clks = card->csd.tacc_clks * 100; | |
494 | ||
495 | timeout_us = data.timeout_ns / 1000; | |
496 | timeout_us += data.timeout_clks * 1000 / | |
497 | (card->host->ios.clock / 1000); | |
498 | ||
499 | if (timeout_us > 100000) { | |
500 | data.timeout_ns = 100000000; | |
501 | data.timeout_clks = 0; | |
502 | } | |
503 | ||
504 | data.blksz = 4; | |
505 | data.blocks = 1; | |
506 | data.flags = MMC_DATA_READ; | |
507 | data.sg = &sg; | |
508 | data.sg_len = 1; | |
509 | ||
ec5a19dd PO |
510 | mrq.cmd = &cmd; |
511 | mrq.data = &data; | |
512 | ||
051913da BD |
513 | blocks = kmalloc(4, GFP_KERNEL); |
514 | if (!blocks) | |
515 | return (u32)-1; | |
516 | ||
517 | sg_init_one(&sg, blocks, 4); | |
ec5a19dd PO |
518 | |
519 | mmc_wait_for_req(card->host, &mrq); | |
520 | ||
051913da BD |
521 | result = ntohl(*blocks); |
522 | kfree(blocks); | |
523 | ||
17b0429d | 524 | if (cmd.error || data.error) |
051913da | 525 | result = (u32)-1; |
ec5a19dd | 526 | |
051913da | 527 | return result; |
ec5a19dd PO |
528 | } |
529 | ||
a01f3ccf RKAL |
530 | static int send_stop(struct mmc_card *card, u32 *status) |
531 | { | |
532 | struct mmc_command cmd = {0}; | |
533 | int err; | |
534 | ||
535 | cmd.opcode = MMC_STOP_TRANSMISSION; | |
536 | cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; | |
537 | err = mmc_wait_for_cmd(card->host, &cmd, 5); | |
538 | if (err == 0) | |
539 | *status = cmd.resp[0]; | |
540 | return err; | |
541 | } | |
542 | ||
0a2d4048 | 543 | static int get_card_status(struct mmc_card *card, u32 *status, int retries) |
504f191f | 544 | { |
1278dba1 | 545 | struct mmc_command cmd = {0}; |
504f191f AH |
546 | int err; |
547 | ||
504f191f AH |
548 | cmd.opcode = MMC_SEND_STATUS; |
549 | if (!mmc_host_is_spi(card->host)) | |
550 | cmd.arg = card->rca << 16; | |
551 | cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; | |
0a2d4048 RKAL |
552 | err = mmc_wait_for_cmd(card->host, &cmd, retries); |
553 | if (err == 0) | |
554 | *status = cmd.resp[0]; | |
555 | return err; | |
504f191f AH |
556 | } |
557 | ||
a01f3ccf RKAL |
558 | #define ERR_RETRY 2 |
559 | #define ERR_ABORT 1 | |
560 | #define ERR_CONTINUE 0 | |
561 | ||
562 | static int mmc_blk_cmd_error(struct request *req, const char *name, int error, | |
563 | bool status_valid, u32 status) | |
564 | { | |
565 | switch (error) { | |
566 | case -EILSEQ: | |
567 | /* response crc error, retry the r/w cmd */ | |
568 | pr_err("%s: %s sending %s command, card status %#x\n", | |
569 | req->rq_disk->disk_name, "response CRC error", | |
570 | name, status); | |
571 | return ERR_RETRY; | |
572 | ||
573 | case -ETIMEDOUT: | |
574 | pr_err("%s: %s sending %s command, card status %#x\n", | |
575 | req->rq_disk->disk_name, "timed out", name, status); | |
576 | ||
577 | /* If the status cmd initially failed, retry the r/w cmd */ | |
578 | if (!status_valid) | |
579 | return ERR_RETRY; | |
580 | ||
581 | /* | |
582 | * If it was a r/w cmd crc error, or illegal command | |
583 | * (eg, issued in wrong state) then retry - we should | |
584 | * have corrected the state problem above. | |
585 | */ | |
586 | if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) | |
587 | return ERR_RETRY; | |
588 | ||
589 | /* Otherwise abort the command */ | |
590 | return ERR_ABORT; | |
591 | ||
592 | default: | |
593 | /* We don't understand the error code the driver gave us */ | |
594 | pr_err("%s: unknown error %d sending read/write command, card status %#x\n", | |
595 | req->rq_disk->disk_name, error, status); | |
596 | return ERR_ABORT; | |
597 | } | |
598 | } | |
599 | ||
600 | /* | |
601 | * Initial r/w and stop cmd error recovery. | |
602 | * We don't know whether the card received the r/w cmd or not, so try to | |
603 | * restore things back to a sane state. Essentially, we do this as follows: | |
604 | * - Obtain card status. If the first attempt to obtain card status fails, | |
605 | * the status word will reflect the failed status cmd, not the failed | |
606 | * r/w cmd. If we fail to obtain card status, it suggests we can no | |
607 | * longer communicate with the card. | |
608 | * - Check the card state. If the card received the cmd but there was a | |
609 | * transient problem with the response, it might still be in a data transfer | |
610 | * mode. Try to send it a stop command. If this fails, we can't recover. | |
611 | * - If the r/w cmd failed due to a response CRC error, it was probably | |
612 | * transient, so retry the cmd. | |
613 | * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry. | |
614 | * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or | |
615 | * illegal cmd, retry. | |
616 | * Otherwise we don't understand what happened, so abort. | |
617 | */ | |
618 | static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req, | |
619 | struct mmc_blk_request *brq) | |
620 | { | |
621 | bool prev_cmd_status_valid = true; | |
622 | u32 status, stop_status = 0; | |
623 | int err, retry; | |
624 | ||
625 | /* | |
626 | * Try to get card status which indicates both the card state | |
627 | * and why there was no response. If the first attempt fails, | |
628 | * we can't be sure the returned status is for the r/w command. | |
629 | */ | |
630 | for (retry = 2; retry >= 0; retry--) { | |
631 | err = get_card_status(card, &status, 0); | |
632 | if (!err) | |
633 | break; | |
634 | ||
635 | prev_cmd_status_valid = false; | |
636 | pr_err("%s: error %d sending status command, %sing\n", | |
637 | req->rq_disk->disk_name, err, retry ? "retry" : "abort"); | |
638 | } | |
639 | ||
640 | /* We couldn't get a response from the card. Give up. */ | |
641 | if (err) | |
642 | return ERR_ABORT; | |
643 | ||
644 | /* | |
645 | * Check the current card state. If it is in some data transfer | |
646 | * mode, tell it to stop (and hopefully transition back to TRAN.) | |
647 | */ | |
648 | if (R1_CURRENT_STATE(status) == R1_STATE_DATA || | |
649 | R1_CURRENT_STATE(status) == R1_STATE_RCV) { | |
650 | err = send_stop(card, &stop_status); | |
651 | if (err) | |
652 | pr_err("%s: error %d sending stop command\n", | |
653 | req->rq_disk->disk_name, err); | |
654 | ||
655 | /* | |
656 | * If the stop cmd also timed out, the card is probably | |
657 | * not present, so abort. Other errors are bad news too. | |
658 | */ | |
659 | if (err) | |
660 | return ERR_ABORT; | |
661 | } | |
662 | ||
663 | /* Check for set block count errors */ | |
664 | if (brq->sbc.error) | |
665 | return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error, | |
666 | prev_cmd_status_valid, status); | |
667 | ||
668 | /* Check for r/w command errors */ | |
669 | if (brq->cmd.error) | |
670 | return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error, | |
671 | prev_cmd_status_valid, status); | |
672 | ||
673 | /* Now for stop errors. These aren't fatal to the transfer. */ | |
674 | pr_err("%s: error %d sending stop command, original cmd response %#x, card status %#x\n", | |
675 | req->rq_disk->disk_name, brq->stop.error, | |
676 | brq->cmd.resp[0], status); | |
677 | ||
678 | /* | |
679 | * Subsitute in our own stop status as this will give the error | |
680 | * state which happened during the execution of the r/w command. | |
681 | */ | |
682 | if (stop_status) { | |
683 | brq->stop.resp[0] = stop_status; | |
684 | brq->stop.error = 0; | |
685 | } | |
686 | return ERR_CONTINUE; | |
687 | } | |
688 | ||
bd788c96 AH |
689 | static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) |
690 | { | |
691 | struct mmc_blk_data *md = mq->data; | |
692 | struct mmc_card *card = md->queue.card; | |
693 | unsigned int from, nr, arg; | |
694 | int err = 0; | |
695 | ||
bd788c96 AH |
696 | if (!mmc_can_erase(card)) { |
697 | err = -EOPNOTSUPP; | |
698 | goto out; | |
699 | } | |
700 | ||
701 | from = blk_rq_pos(req); | |
702 | nr = blk_rq_sectors(req); | |
703 | ||
704 | if (mmc_can_trim(card)) | |
705 | arg = MMC_TRIM_ARG; | |
706 | else | |
707 | arg = MMC_ERASE_ARG; | |
708 | ||
6a7a6b45 AW |
709 | if (card->quirks & MMC_QUIRK_INAND_CMD38) { |
710 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | |
711 | INAND_CMD38_ARG_EXT_CSD, | |
712 | arg == MMC_TRIM_ARG ? | |
713 | INAND_CMD38_ARG_TRIM : | |
714 | INAND_CMD38_ARG_ERASE, | |
715 | 0); | |
716 | if (err) | |
717 | goto out; | |
718 | } | |
bd788c96 AH |
719 | err = mmc_erase(card, from, nr, arg); |
720 | out: | |
721 | spin_lock_irq(&md->lock); | |
722 | __blk_end_request(req, err, blk_rq_bytes(req)); | |
723 | spin_unlock_irq(&md->lock); | |
724 | ||
bd788c96 AH |
725 | return err ? 0 : 1; |
726 | } | |
727 | ||
49804548 AH |
728 | static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq, |
729 | struct request *req) | |
730 | { | |
731 | struct mmc_blk_data *md = mq->data; | |
732 | struct mmc_card *card = md->queue.card; | |
733 | unsigned int from, nr, arg; | |
734 | int err = 0; | |
735 | ||
49804548 AH |
736 | if (!mmc_can_secure_erase_trim(card)) { |
737 | err = -EOPNOTSUPP; | |
738 | goto out; | |
739 | } | |
740 | ||
741 | from = blk_rq_pos(req); | |
742 | nr = blk_rq_sectors(req); | |
743 | ||
744 | if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr)) | |
745 | arg = MMC_SECURE_TRIM1_ARG; | |
746 | else | |
747 | arg = MMC_SECURE_ERASE_ARG; | |
748 | ||
6a7a6b45 AW |
749 | if (card->quirks & MMC_QUIRK_INAND_CMD38) { |
750 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | |
751 | INAND_CMD38_ARG_EXT_CSD, | |
752 | arg == MMC_SECURE_TRIM1_ARG ? | |
753 | INAND_CMD38_ARG_SECTRIM1 : | |
754 | INAND_CMD38_ARG_SECERASE, | |
755 | 0); | |
756 | if (err) | |
757 | goto out; | |
758 | } | |
49804548 | 759 | err = mmc_erase(card, from, nr, arg); |
6a7a6b45 AW |
760 | if (!err && arg == MMC_SECURE_TRIM1_ARG) { |
761 | if (card->quirks & MMC_QUIRK_INAND_CMD38) { | |
762 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | |
763 | INAND_CMD38_ARG_EXT_CSD, | |
764 | INAND_CMD38_ARG_SECTRIM2, | |
765 | 0); | |
766 | if (err) | |
767 | goto out; | |
768 | } | |
49804548 | 769 | err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG); |
6a7a6b45 | 770 | } |
49804548 AH |
771 | out: |
772 | spin_lock_irq(&md->lock); | |
773 | __blk_end_request(req, err, blk_rq_bytes(req)); | |
774 | spin_unlock_irq(&md->lock); | |
775 | ||
49804548 AH |
776 | return err ? 0 : 1; |
777 | } | |
778 | ||
f4c5522b AW |
779 | static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req) |
780 | { | |
781 | struct mmc_blk_data *md = mq->data; | |
782 | ||
783 | /* | |
784 | * No-op, only service this because we need REQ_FUA for reliable | |
785 | * writes. | |
786 | */ | |
787 | spin_lock_irq(&md->lock); | |
788 | __blk_end_request_all(req, 0); | |
789 | spin_unlock_irq(&md->lock); | |
790 | ||
791 | return 1; | |
792 | } | |
793 | ||
794 | /* | |
795 | * Reformat current write as a reliable write, supporting | |
796 | * both legacy and the enhanced reliable write MMC cards. | |
797 | * In each transfer we'll handle only as much as a single | |
798 | * reliable write can handle, thus finish the request in | |
799 | * partial completions. | |
800 | */ | |
d0c97cfb AW |
801 | static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq, |
802 | struct mmc_card *card, | |
803 | struct request *req) | |
f4c5522b | 804 | { |
f4c5522b AW |
805 | if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) { |
806 | /* Legacy mode imposes restrictions on transfers. */ | |
807 | if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors)) | |
808 | brq->data.blocks = 1; | |
809 | ||
810 | if (brq->data.blocks > card->ext_csd.rel_sectors) | |
811 | brq->data.blocks = card->ext_csd.rel_sectors; | |
812 | else if (brq->data.blocks < card->ext_csd.rel_sectors) | |
813 | brq->data.blocks = 1; | |
814 | } | |
f4c5522b AW |
815 | } |
816 | ||
4c2b8f26 RKAL |
817 | #define CMD_ERRORS \ |
818 | (R1_OUT_OF_RANGE | /* Command argument out of range */ \ | |
819 | R1_ADDRESS_ERROR | /* Misaligned address */ \ | |
820 | R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\ | |
821 | R1_WP_VIOLATION | /* Tried to write to protected block */ \ | |
822 | R1_CC_ERROR | /* Card controller error */ \ | |
823 | R1_ERROR) /* General/unknown error */ | |
824 | ||
ee8a43a5 PF |
825 | static int mmc_blk_err_check(struct mmc_card *card, |
826 | struct mmc_async_req *areq) | |
d78d4a8a | 827 | { |
ee8a43a5 PF |
828 | enum mmc_blk_status ret = MMC_BLK_SUCCESS; |
829 | struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req, | |
830 | mmc_active); | |
831 | struct mmc_blk_request *brq = &mq_mrq->brq; | |
832 | struct request *req = mq_mrq->req; | |
d78d4a8a PF |
833 | |
834 | /* | |
835 | * sbc.error indicates a problem with the set block count | |
836 | * command. No data will have been transferred. | |
837 | * | |
838 | * cmd.error indicates a problem with the r/w command. No | |
839 | * data will have been transferred. | |
840 | * | |
841 | * stop.error indicates a problem with the stop command. Data | |
842 | * may have been transferred, or may still be transferring. | |
843 | */ | |
844 | if (brq->sbc.error || brq->cmd.error || brq->stop.error) { | |
845 | switch (mmc_blk_cmd_recovery(card, req, brq)) { | |
846 | case ERR_RETRY: | |
847 | return MMC_BLK_RETRY; | |
848 | case ERR_ABORT: | |
849 | return MMC_BLK_ABORT; | |
850 | case ERR_CONTINUE: | |
851 | break; | |
852 | } | |
853 | } | |
854 | ||
855 | /* | |
856 | * Check for errors relating to the execution of the | |
857 | * initial command - such as address errors. No data | |
858 | * has been transferred. | |
859 | */ | |
860 | if (brq->cmd.resp[0] & CMD_ERRORS) { | |
861 | pr_err("%s: r/w command failed, status = %#x\n", | |
862 | req->rq_disk->disk_name, brq->cmd.resp[0]); | |
863 | return MMC_BLK_ABORT; | |
864 | } | |
865 | ||
866 | /* | |
867 | * Everything else is either success, or a data error of some | |
868 | * kind. If it was a write, we may have transitioned to | |
869 | * program mode, which we have to wait for it to complete. | |
870 | */ | |
871 | if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) { | |
872 | u32 status; | |
873 | do { | |
874 | int err = get_card_status(card, &status, 5); | |
875 | if (err) { | |
876 | printk(KERN_ERR "%s: error %d requesting status\n", | |
877 | req->rq_disk->disk_name, err); | |
878 | return MMC_BLK_CMD_ERR; | |
879 | } | |
880 | /* | |
881 | * Some cards mishandle the status bits, | |
882 | * so make sure to check both the busy | |
883 | * indication and the card state. | |
884 | */ | |
885 | } while (!(status & R1_READY_FOR_DATA) || | |
886 | (R1_CURRENT_STATE(status) == R1_STATE_PRG)); | |
887 | } | |
888 | ||
889 | if (brq->data.error) { | |
890 | pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n", | |
891 | req->rq_disk->disk_name, brq->data.error, | |
892 | (unsigned)blk_rq_pos(req), | |
893 | (unsigned)blk_rq_sectors(req), | |
894 | brq->cmd.resp[0], brq->stop.resp[0]); | |
895 | ||
896 | if (rq_data_dir(req) == READ) { | |
897 | if (brq->data.blocks > 1) { | |
898 | /* Redo read one sector at a time */ | |
899 | pr_warning("%s: retrying using single block read\n", | |
900 | req->rq_disk->disk_name); | |
901 | return MMC_BLK_RETRY_SINGLE; | |
902 | } | |
903 | return MMC_BLK_DATA_ERR; | |
904 | } else { | |
905 | return MMC_BLK_CMD_ERR; | |
906 | } | |
907 | } | |
908 | ||
909 | if (ret == MMC_BLK_SUCCESS && | |
910 | blk_rq_bytes(req) != brq->data.bytes_xfered) | |
911 | ret = MMC_BLK_PARTIAL; | |
912 | ||
913 | return ret; | |
914 | } | |
915 | ||
54d49d77 PF |
916 | static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, |
917 | struct mmc_card *card, | |
918 | int disable_multi, | |
919 | struct mmc_queue *mq) | |
1da177e4 | 920 | { |
54d49d77 PF |
921 | u32 readcmd, writecmd; |
922 | struct mmc_blk_request *brq = &mqrq->brq; | |
923 | struct request *req = mqrq->req; | |
1da177e4 | 924 | struct mmc_blk_data *md = mq->data; |
1da177e4 | 925 | |
f4c5522b AW |
926 | /* |
927 | * Reliable writes are used to implement Forced Unit Access and | |
928 | * REQ_META accesses, and are supported only on MMCs. | |
65299a3b CH |
929 | * |
930 | * XXX: this really needs a good explanation of why REQ_META | |
931 | * is treated special. | |
f4c5522b AW |
932 | */ |
933 | bool do_rel_wr = ((req->cmd_flags & REQ_FUA) || | |
934 | (req->cmd_flags & REQ_META)) && | |
935 | (rq_data_dir(req) == WRITE) && | |
d0c97cfb | 936 | (md->flags & MMC_BLK_REL_WR); |
f4c5522b | 937 | |
54d49d77 PF |
938 | memset(brq, 0, sizeof(struct mmc_blk_request)); |
939 | brq->mrq.cmd = &brq->cmd; | |
940 | brq->mrq.data = &brq->data; | |
1da177e4 | 941 | |
54d49d77 PF |
942 | brq->cmd.arg = blk_rq_pos(req); |
943 | if (!mmc_card_blockaddr(card)) | |
944 | brq->cmd.arg <<= 9; | |
945 | brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; | |
946 | brq->data.blksz = 512; | |
947 | brq->stop.opcode = MMC_STOP_TRANSMISSION; | |
948 | brq->stop.arg = 0; | |
949 | brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; | |
950 | brq->data.blocks = blk_rq_sectors(req); | |
6a79e391 | 951 | |
54d49d77 PF |
952 | /* |
953 | * The block layer doesn't support all sector count | |
954 | * restrictions, so we need to be prepared for too big | |
955 | * requests. | |
956 | */ | |
957 | if (brq->data.blocks > card->host->max_blk_count) | |
958 | brq->data.blocks = card->host->max_blk_count; | |
1da177e4 | 959 | |
54d49d77 PF |
960 | /* |
961 | * After a read error, we redo the request one sector at a time | |
962 | * in order to accurately determine which sectors can be read | |
963 | * successfully. | |
964 | */ | |
965 | if (disable_multi && brq->data.blocks > 1) | |
966 | brq->data.blocks = 1; | |
d0c97cfb | 967 | |
54d49d77 PF |
968 | if (brq->data.blocks > 1 || do_rel_wr) { |
969 | /* SPI multiblock writes terminate using a special | |
970 | * token, not a STOP_TRANSMISSION request. | |
d0c97cfb | 971 | */ |
54d49d77 PF |
972 | if (!mmc_host_is_spi(card->host) || |
973 | rq_data_dir(req) == READ) | |
974 | brq->mrq.stop = &brq->stop; | |
975 | readcmd = MMC_READ_MULTIPLE_BLOCK; | |
976 | writecmd = MMC_WRITE_MULTIPLE_BLOCK; | |
977 | } else { | |
978 | brq->mrq.stop = NULL; | |
979 | readcmd = MMC_READ_SINGLE_BLOCK; | |
980 | writecmd = MMC_WRITE_BLOCK; | |
981 | } | |
982 | if (rq_data_dir(req) == READ) { | |
983 | brq->cmd.opcode = readcmd; | |
984 | brq->data.flags |= MMC_DATA_READ; | |
985 | } else { | |
986 | brq->cmd.opcode = writecmd; | |
987 | brq->data.flags |= MMC_DATA_WRITE; | |
988 | } | |
d0c97cfb | 989 | |
54d49d77 PF |
990 | if (do_rel_wr) |
991 | mmc_apply_rel_rw(brq, card, req); | |
f4c5522b | 992 | |
54d49d77 PF |
993 | /* |
994 | * Pre-defined multi-block transfers are preferable to | |
995 | * open ended-ones (and necessary for reliable writes). | |
996 | * However, it is not sufficient to just send CMD23, | |
997 | * and avoid the final CMD12, as on an error condition | |
998 | * CMD12 (stop) needs to be sent anyway. This, coupled | |
999 | * with Auto-CMD23 enhancements provided by some | |
1000 | * hosts, means that the complexity of dealing | |
1001 | * with this is best left to the host. If CMD23 is | |
1002 | * supported by card and host, we'll fill sbc in and let | |
1003 | * the host deal with handling it correctly. This means | |
1004 | * that for hosts that don't expose MMC_CAP_CMD23, no | |
1005 | * change of behavior will be observed. | |
1006 | * | |
1007 | * N.B: Some MMC cards experience perf degradation. | |
1008 | * We'll avoid using CMD23-bounded multiblock writes for | |
1009 | * these, while retaining features like reliable writes. | |
1010 | */ | |
b146d26a | 1011 | |
54d49d77 PF |
1012 | if ((md->flags & MMC_BLK_CMD23) && |
1013 | mmc_op_multi(brq->cmd.opcode) && | |
1014 | (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23))) { | |
1015 | brq->sbc.opcode = MMC_SET_BLOCK_COUNT; | |
1016 | brq->sbc.arg = brq->data.blocks | | |
1017 | (do_rel_wr ? (1 << 31) : 0); | |
1018 | brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; | |
1019 | brq->mrq.sbc = &brq->sbc; | |
1020 | } | |
98ccf149 | 1021 | |
54d49d77 PF |
1022 | mmc_set_data_timeout(&brq->data, card); |
1023 | ||
1024 | brq->data.sg = mqrq->sg; | |
1025 | brq->data.sg_len = mmc_queue_map_sg(mq, mqrq); | |
1026 | ||
1027 | /* | |
1028 | * Adjust the sg list so it is the same size as the | |
1029 | * request. | |
1030 | */ | |
1031 | if (brq->data.blocks != blk_rq_sectors(req)) { | |
1032 | int i, data_size = brq->data.blocks << 9; | |
1033 | struct scatterlist *sg; | |
1034 | ||
1035 | for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) { | |
1036 | data_size -= sg->length; | |
1037 | if (data_size <= 0) { | |
1038 | sg->length += data_size; | |
1039 | i++; | |
1040 | break; | |
6a79e391 | 1041 | } |
6a79e391 | 1042 | } |
54d49d77 PF |
1043 | brq->data.sg_len = i; |
1044 | } | |
1045 | ||
ee8a43a5 PF |
1046 | mqrq->mmc_active.mrq = &brq->mrq; |
1047 | mqrq->mmc_active.err_check = mmc_blk_err_check; | |
1048 | ||
54d49d77 PF |
1049 | mmc_queue_bounce_pre(mqrq); |
1050 | } | |
6a79e391 | 1051 | |
ee8a43a5 | 1052 | static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) |
54d49d77 PF |
1053 | { |
1054 | struct mmc_blk_data *md = mq->data; | |
1055 | struct mmc_card *card = md->queue.card; | |
1056 | struct mmc_blk_request *brq = &mq->mqrq_cur->brq; | |
1057 | int ret = 1, disable_multi = 0, retry = 0; | |
d78d4a8a | 1058 | enum mmc_blk_status status; |
ee8a43a5 PF |
1059 | struct mmc_queue_req *mq_rq; |
1060 | struct request *req; | |
1061 | struct mmc_async_req *areq; | |
1da177e4 | 1062 | |
ee8a43a5 PF |
1063 | if (!rqc && !mq->mqrq_prev->req) |
1064 | return 0; | |
98ccf149 | 1065 | |
ee8a43a5 PF |
1066 | do { |
1067 | if (rqc) { | |
1068 | mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); | |
1069 | areq = &mq->mqrq_cur->mmc_active; | |
1070 | } else | |
1071 | areq = NULL; | |
1072 | areq = mmc_start_req(card->host, areq, (int *) &status); | |
1073 | if (!areq) | |
1074 | return 0; | |
1075 | ||
1076 | mq_rq = container_of(areq, struct mmc_queue_req, mmc_active); | |
1077 | brq = &mq_rq->brq; | |
1078 | req = mq_rq->req; | |
1079 | mmc_queue_bounce_post(mq_rq); | |
98ccf149 | 1080 | |
d78d4a8a PF |
1081 | switch (status) { |
1082 | case MMC_BLK_SUCCESS: | |
1083 | case MMC_BLK_PARTIAL: | |
1084 | /* | |
1085 | * A block was successfully transferred. | |
1086 | */ | |
1087 | spin_lock_irq(&md->lock); | |
1088 | ret = __blk_end_request(req, 0, | |
1089 | brq->data.bytes_xfered); | |
1090 | spin_unlock_irq(&md->lock); | |
ee8a43a5 PF |
1091 | if (status == MMC_BLK_SUCCESS && ret) { |
1092 | /* | |
1093 | * The blk_end_request has returned non zero | |
1094 | * even though all data is transfered and no | |
1095 | * erros returned by host. | |
1096 | * If this happen it's a bug. | |
1097 | */ | |
1098 | printk(KERN_ERR "%s BUG rq_tot %d d_xfer %d\n", | |
1099 | __func__, blk_rq_bytes(req), | |
1100 | brq->data.bytes_xfered); | |
1101 | rqc = NULL; | |
1102 | goto cmd_abort; | |
1103 | } | |
d78d4a8a PF |
1104 | break; |
1105 | case MMC_BLK_CMD_ERR: | |
1106 | goto cmd_err; | |
1107 | case MMC_BLK_RETRY_SINGLE: | |
1108 | disable_multi = 1; | |
1109 | break; | |
1110 | case MMC_BLK_RETRY: | |
1111 | if (retry++ < 5) | |
a01f3ccf | 1112 | break; |
d78d4a8a | 1113 | case MMC_BLK_ABORT: |
4c2b8f26 | 1114 | goto cmd_abort; |
d78d4a8a PF |
1115 | case MMC_BLK_DATA_ERR: |
1116 | /* | |
1117 | * After an error, we redo I/O one sector at a | |
1118 | * time, so we only reach here after trying to | |
1119 | * read a single sector. | |
1120 | */ | |
1121 | spin_lock_irq(&md->lock); | |
1122 | ret = __blk_end_request(req, -EIO, | |
1123 | brq->data.blksz); | |
1124 | spin_unlock_irq(&md->lock); | |
ee8a43a5 PF |
1125 | if (!ret) |
1126 | goto start_new_req; | |
d78d4a8a | 1127 | break; |
4c2b8f26 RKAL |
1128 | } |
1129 | ||
ee8a43a5 PF |
1130 | if (ret) { |
1131 | /* | |
1132 | * In case of a none complete request | |
1133 | * prepare it again and resend. | |
1134 | */ | |
1135 | mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq); | |
1136 | mmc_start_req(card->host, &mq_rq->mmc_active, NULL); | |
1137 | } | |
1da177e4 LT |
1138 | } while (ret); |
1139 | ||
1da177e4 LT |
1140 | return 1; |
1141 | ||
1142 | cmd_err: | |
ec5a19dd PO |
1143 | /* |
1144 | * If this is an SD card and we're writing, we can first | |
1145 | * mark the known good sectors as ok. | |
1146 | * | |
1147 | * If the card is not SD, we can still ok written sectors | |
23af6039 PO |
1148 | * as reported by the controller (which might be less than |
1149 | * the real number of written sectors, but never more). | |
1da177e4 | 1150 | */ |
6a79e391 AH |
1151 | if (mmc_card_sd(card)) { |
1152 | u32 blocks; | |
23af6039 | 1153 | |
6a79e391 AH |
1154 | blocks = mmc_sd_num_wr_blocks(card); |
1155 | if (blocks != (u32)-1) { | |
ec5a19dd | 1156 | spin_lock_irq(&md->lock); |
6a79e391 | 1157 | ret = __blk_end_request(req, 0, blocks << 9); |
ec5a19dd PO |
1158 | spin_unlock_irq(&md->lock); |
1159 | } | |
6a79e391 AH |
1160 | } else { |
1161 | spin_lock_irq(&md->lock); | |
97868a2b | 1162 | ret = __blk_end_request(req, 0, brq->data.bytes_xfered); |
6a79e391 | 1163 | spin_unlock_irq(&md->lock); |
176f00ff PO |
1164 | } |
1165 | ||
a01f3ccf | 1166 | cmd_abort: |
1da177e4 | 1167 | spin_lock_irq(&md->lock); |
fd539832 KU |
1168 | while (ret) |
1169 | ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req)); | |
1da177e4 LT |
1170 | spin_unlock_irq(&md->lock); |
1171 | ||
ee8a43a5 PF |
1172 | start_new_req: |
1173 | if (rqc) { | |
1174 | mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); | |
1175 | mmc_start_req(card->host, &mq->mqrq_cur->mmc_active, NULL); | |
1176 | } | |
1177 | ||
1da177e4 LT |
1178 | return 0; |
1179 | } | |
1180 | ||
bd788c96 AH |
1181 | static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) |
1182 | { | |
1a258db6 AW |
1183 | int ret; |
1184 | struct mmc_blk_data *md = mq->data; | |
1185 | struct mmc_card *card = md->queue.card; | |
1186 | ||
ee8a43a5 PF |
1187 | if (req && !mq->mqrq_prev->req) |
1188 | /* claim host only for the first request */ | |
1189 | mmc_claim_host(card->host); | |
1190 | ||
371a689f AW |
1191 | ret = mmc_blk_part_switch(card, md); |
1192 | if (ret) { | |
1193 | ret = 0; | |
1194 | goto out; | |
1195 | } | |
1a258db6 | 1196 | |
ee8a43a5 PF |
1197 | if (req && req->cmd_flags & REQ_DISCARD) { |
1198 | /* complete ongoing async transfer before issuing discard */ | |
1199 | if (card->host->areq) | |
1200 | mmc_blk_issue_rw_rq(mq, NULL); | |
49804548 | 1201 | if (req->cmd_flags & REQ_SECURE) |
1a258db6 | 1202 | ret = mmc_blk_issue_secdiscard_rq(mq, req); |
49804548 | 1203 | else |
1a258db6 | 1204 | ret = mmc_blk_issue_discard_rq(mq, req); |
ee8a43a5 | 1205 | } else if (req && req->cmd_flags & REQ_FLUSH) { |
393f9a08 JC |
1206 | /* complete ongoing async transfer before issuing flush */ |
1207 | if (card->host->areq) | |
1208 | mmc_blk_issue_rw_rq(mq, NULL); | |
1a258db6 | 1209 | ret = mmc_blk_issue_flush(mq, req); |
49804548 | 1210 | } else { |
1a258db6 | 1211 | ret = mmc_blk_issue_rw_rq(mq, req); |
49804548 | 1212 | } |
1a258db6 | 1213 | |
371a689f | 1214 | out: |
ee8a43a5 PF |
1215 | if (!req) |
1216 | /* release host only when there are no more requests */ | |
1217 | mmc_release_host(card->host); | |
1a258db6 | 1218 | return ret; |
bd788c96 | 1219 | } |
1da177e4 | 1220 | |
a6f6c96b RK |
1221 | static inline int mmc_blk_readonly(struct mmc_card *card) |
1222 | { | |
1223 | return mmc_card_readonly(card) || | |
1224 | !(card->csd.cmdclass & CCC_BLOCK_WRITE); | |
1225 | } | |
1226 | ||
371a689f AW |
1227 | static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, |
1228 | struct device *parent, | |
1229 | sector_t size, | |
1230 | bool default_ro, | |
1231 | const char *subname) | |
1da177e4 LT |
1232 | { |
1233 | struct mmc_blk_data *md; | |
1234 | int devidx, ret; | |
1235 | ||
5e71b7a6 OJ |
1236 | devidx = find_first_zero_bit(dev_use, max_devices); |
1237 | if (devidx >= max_devices) | |
1da177e4 LT |
1238 | return ERR_PTR(-ENOSPC); |
1239 | __set_bit(devidx, dev_use); | |
1240 | ||
dd00cc48 | 1241 | md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL); |
a6f6c96b RK |
1242 | if (!md) { |
1243 | ret = -ENOMEM; | |
1244 | goto out; | |
1245 | } | |
1da177e4 | 1246 | |
f06c9153 AW |
1247 | /* |
1248 | * !subname implies we are creating main mmc_blk_data that will be | |
1249 | * associated with mmc_card with mmc_set_drvdata. Due to device | |
1250 | * partitions, devidx will not coincide with a per-physical card | |
1251 | * index anymore so we keep track of a name index. | |
1252 | */ | |
1253 | if (!subname) { | |
1254 | md->name_idx = find_first_zero_bit(name_use, max_devices); | |
1255 | __set_bit(md->name_idx, name_use); | |
1256 | } | |
1257 | else | |
1258 | md->name_idx = ((struct mmc_blk_data *) | |
1259 | dev_to_disk(parent)->private_data)->name_idx; | |
1260 | ||
a6f6c96b RK |
1261 | /* |
1262 | * Set the read-only status based on the supported commands | |
1263 | * and the write protect switch. | |
1264 | */ | |
1265 | md->read_only = mmc_blk_readonly(card); | |
1da177e4 | 1266 | |
5e71b7a6 | 1267 | md->disk = alloc_disk(perdev_minors); |
a6f6c96b RK |
1268 | if (md->disk == NULL) { |
1269 | ret = -ENOMEM; | |
1270 | goto err_kfree; | |
1271 | } | |
1da177e4 | 1272 | |
a6f6c96b | 1273 | spin_lock_init(&md->lock); |
371a689f | 1274 | INIT_LIST_HEAD(&md->part); |
a6f6c96b | 1275 | md->usage = 1; |
1da177e4 | 1276 | |
d09408ad | 1277 | ret = mmc_init_queue(&md->queue, card, &md->lock, subname); |
a6f6c96b RK |
1278 | if (ret) |
1279 | goto err_putdisk; | |
1da177e4 | 1280 | |
a6f6c96b RK |
1281 | md->queue.issue_fn = mmc_blk_issue_rq; |
1282 | md->queue.data = md; | |
d2b18394 | 1283 | |
fe6b4c88 | 1284 | md->disk->major = MMC_BLOCK_MAJOR; |
5e71b7a6 | 1285 | md->disk->first_minor = devidx * perdev_minors; |
a6f6c96b RK |
1286 | md->disk->fops = &mmc_bdops; |
1287 | md->disk->private_data = md; | |
1288 | md->disk->queue = md->queue.queue; | |
371a689f AW |
1289 | md->disk->driverfs_dev = parent; |
1290 | set_disk_ro(md->disk, md->read_only || default_ro); | |
a6f6c96b RK |
1291 | |
1292 | /* | |
1293 | * As discussed on lkml, GENHD_FL_REMOVABLE should: | |
1294 | * | |
1295 | * - be set for removable media with permanent block devices | |
1296 | * - be unset for removable block devices with permanent media | |
1297 | * | |
1298 | * Since MMC block devices clearly fall under the second | |
1299 | * case, we do not set GENHD_FL_REMOVABLE. Userspace | |
1300 | * should use the block device creation/destruction hotplug | |
1301 | * messages to tell when the card is present. | |
1302 | */ | |
1303 | ||
f06c9153 AW |
1304 | snprintf(md->disk->disk_name, sizeof(md->disk->disk_name), |
1305 | "mmcblk%d%s", md->name_idx, subname ? subname : ""); | |
a6f6c96b | 1306 | |
e1defc4f | 1307 | blk_queue_logical_block_size(md->queue.queue, 512); |
371a689f | 1308 | set_capacity(md->disk, size); |
d0c97cfb | 1309 | |
f0d89972 AW |
1310 | if (mmc_host_cmd23(card->host)) { |
1311 | if (mmc_card_mmc(card) || | |
1312 | (mmc_card_sd(card) && | |
1313 | card->scr.cmds & SD_SCR_CMD23_SUPPORT)) | |
1314 | md->flags |= MMC_BLK_CMD23; | |
1315 | } | |
d0c97cfb AW |
1316 | |
1317 | if (mmc_card_mmc(card) && | |
1318 | md->flags & MMC_BLK_CMD23 && | |
1319 | ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) || | |
1320 | card->ext_csd.rel_sectors)) { | |
1321 | md->flags |= MMC_BLK_REL_WR; | |
1322 | blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA); | |
1323 | } | |
1324 | ||
371a689f AW |
1325 | return md; |
1326 | ||
1327 | err_putdisk: | |
1328 | put_disk(md->disk); | |
1329 | err_kfree: | |
1330 | kfree(md); | |
1331 | out: | |
1332 | return ERR_PTR(ret); | |
1333 | } | |
1334 | ||
1335 | static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card) | |
1336 | { | |
1337 | sector_t size; | |
1338 | struct mmc_blk_data *md; | |
a6f6c96b | 1339 | |
85a18ad9 PO |
1340 | if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) { |
1341 | /* | |
1342 | * The EXT_CSD sector count is in number or 512 byte | |
1343 | * sectors. | |
1344 | */ | |
371a689f | 1345 | size = card->ext_csd.sectors; |
85a18ad9 PO |
1346 | } else { |
1347 | /* | |
1348 | * The CSD capacity field is in units of read_blkbits. | |
1349 | * set_capacity takes units of 512 bytes. | |
1350 | */ | |
371a689f | 1351 | size = card->csd.capacity << (card->csd.read_blkbits - 9); |
85a18ad9 | 1352 | } |
371a689f AW |
1353 | |
1354 | md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL); | |
1da177e4 | 1355 | return md; |
371a689f | 1356 | } |
a6f6c96b | 1357 | |
371a689f AW |
1358 | static int mmc_blk_alloc_part(struct mmc_card *card, |
1359 | struct mmc_blk_data *md, | |
1360 | unsigned int part_type, | |
1361 | sector_t size, | |
1362 | bool default_ro, | |
1363 | const char *subname) | |
1364 | { | |
1365 | char cap_str[10]; | |
1366 | struct mmc_blk_data *part_md; | |
1367 | ||
1368 | part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro, | |
1369 | subname); | |
1370 | if (IS_ERR(part_md)) | |
1371 | return PTR_ERR(part_md); | |
1372 | part_md->part_type = part_type; | |
1373 | list_add(&part_md->part, &md->part); | |
1374 | ||
1375 | string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2, | |
1376 | cap_str, sizeof(cap_str)); | |
1377 | printk(KERN_INFO "%s: %s %s partition %u %s\n", | |
1378 | part_md->disk->disk_name, mmc_card_id(card), | |
1379 | mmc_card_name(card), part_md->part_type, cap_str); | |
1380 | return 0; | |
1381 | } | |
1382 | ||
1383 | static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md) | |
1384 | { | |
1385 | int ret = 0; | |
1386 | ||
1387 | if (!mmc_card_mmc(card)) | |
1388 | return 0; | |
1389 | ||
1390 | if (card->ext_csd.boot_size) { | |
1391 | ret = mmc_blk_alloc_part(card, md, EXT_CSD_PART_CONFIG_ACC_BOOT0, | |
1392 | card->ext_csd.boot_size >> 9, | |
1393 | true, | |
1394 | "boot0"); | |
1395 | if (ret) | |
1396 | return ret; | |
1397 | ret = mmc_blk_alloc_part(card, md, EXT_CSD_PART_CONFIG_ACC_BOOT1, | |
1398 | card->ext_csd.boot_size >> 9, | |
1399 | true, | |
1400 | "boot1"); | |
1401 | if (ret) | |
1402 | return ret; | |
1403 | } | |
1404 | ||
1405 | return ret; | |
1da177e4 LT |
1406 | } |
1407 | ||
1408 | static int | |
1409 | mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card) | |
1410 | { | |
1da177e4 LT |
1411 | int err; |
1412 | ||
b855885e | 1413 | mmc_claim_host(card->host); |
0f8d8ea6 | 1414 | err = mmc_set_blocklen(card, 512); |
b855885e | 1415 | mmc_release_host(card->host); |
1da177e4 LT |
1416 | |
1417 | if (err) { | |
0f8d8ea6 AH |
1418 | printk(KERN_ERR "%s: unable to set block size to 512: %d\n", |
1419 | md->disk->disk_name, err); | |
1da177e4 LT |
1420 | return -EINVAL; |
1421 | } | |
1422 | ||
1423 | return 0; | |
1424 | } | |
1425 | ||
371a689f AW |
1426 | static void mmc_blk_remove_req(struct mmc_blk_data *md) |
1427 | { | |
1428 | if (md) { | |
1429 | if (md->disk->flags & GENHD_FL_UP) { | |
1430 | device_remove_file(disk_to_dev(md->disk), &md->force_ro); | |
1431 | ||
1432 | /* Stop new requests from getting into the queue */ | |
1433 | del_gendisk(md->disk); | |
1434 | } | |
1435 | ||
1436 | /* Then flush out any already in there */ | |
1437 | mmc_cleanup_queue(&md->queue); | |
1438 | mmc_blk_put(md); | |
1439 | } | |
1440 | } | |
1441 | ||
1442 | static void mmc_blk_remove_parts(struct mmc_card *card, | |
1443 | struct mmc_blk_data *md) | |
1444 | { | |
1445 | struct list_head *pos, *q; | |
1446 | struct mmc_blk_data *part_md; | |
1447 | ||
f06c9153 | 1448 | __clear_bit(md->name_idx, name_use); |
371a689f AW |
1449 | list_for_each_safe(pos, q, &md->part) { |
1450 | part_md = list_entry(pos, struct mmc_blk_data, part); | |
1451 | list_del(pos); | |
1452 | mmc_blk_remove_req(part_md); | |
1453 | } | |
1454 | } | |
1455 | ||
1456 | static int mmc_add_disk(struct mmc_blk_data *md) | |
1457 | { | |
1458 | int ret; | |
1459 | ||
1460 | add_disk(md->disk); | |
1461 | md->force_ro.show = force_ro_show; | |
1462 | md->force_ro.store = force_ro_store; | |
641c3187 | 1463 | sysfs_attr_init(&md->force_ro.attr); |
371a689f AW |
1464 | md->force_ro.attr.name = "force_ro"; |
1465 | md->force_ro.attr.mode = S_IRUGO | S_IWUSR; | |
1466 | ret = device_create_file(disk_to_dev(md->disk), &md->force_ro); | |
1467 | if (ret) | |
1468 | del_gendisk(md->disk); | |
1469 | ||
1470 | return ret; | |
1471 | } | |
1472 | ||
6f60c222 AW |
1473 | static const struct mmc_fixup blk_fixups[] = |
1474 | { | |
6a7a6b45 AW |
1475 | MMC_FIXUP("SEM02G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38), |
1476 | MMC_FIXUP("SEM04G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38), | |
1477 | MMC_FIXUP("SEM08G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38), | |
1478 | MMC_FIXUP("SEM16G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38), | |
1479 | MMC_FIXUP("SEM32G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38), | |
d0c97cfb AW |
1480 | |
1481 | /* | |
1482 | * Some MMC cards experience performance degradation with CMD23 | |
1483 | * instead of CMD12-bounded multiblock transfers. For now we'll | |
1484 | * black list what's bad... | |
1485 | * - Certain Toshiba cards. | |
1486 | * | |
1487 | * N.B. This doesn't affect SD cards. | |
1488 | */ | |
1489 | MMC_FIXUP("MMC08G", 0x11, CID_OEMID_ANY, add_quirk_mmc, | |
1490 | MMC_QUIRK_BLK_NO_CMD23), | |
1491 | MMC_FIXUP("MMC16G", 0x11, CID_OEMID_ANY, add_quirk_mmc, | |
1492 | MMC_QUIRK_BLK_NO_CMD23), | |
1493 | MMC_FIXUP("MMC32G", 0x11, CID_OEMID_ANY, add_quirk_mmc, | |
1494 | MMC_QUIRK_BLK_NO_CMD23), | |
6f60c222 AW |
1495 | END_FIXUP |
1496 | }; | |
1497 | ||
1da177e4 LT |
1498 | static int mmc_blk_probe(struct mmc_card *card) |
1499 | { | |
371a689f | 1500 | struct mmc_blk_data *md, *part_md; |
1da177e4 | 1501 | int err; |
a7bbb573 PO |
1502 | char cap_str[10]; |
1503 | ||
912490db PO |
1504 | /* |
1505 | * Check that the card supports the command class(es) we need. | |
1506 | */ | |
1507 | if (!(card->csd.cmdclass & CCC_BLOCK_READ)) | |
1da177e4 LT |
1508 | return -ENODEV; |
1509 | ||
1da177e4 LT |
1510 | md = mmc_blk_alloc(card); |
1511 | if (IS_ERR(md)) | |
1512 | return PTR_ERR(md); | |
1513 | ||
1514 | err = mmc_blk_set_blksize(md, card); | |
1515 | if (err) | |
1516 | goto out; | |
1517 | ||
444122fd | 1518 | string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2, |
a7bbb573 PO |
1519 | cap_str, sizeof(cap_str)); |
1520 | printk(KERN_INFO "%s: %s %s %s %s\n", | |
1da177e4 | 1521 | md->disk->disk_name, mmc_card_id(card), mmc_card_name(card), |
a7bbb573 | 1522 | cap_str, md->read_only ? "(ro)" : ""); |
1da177e4 | 1523 | |
371a689f AW |
1524 | if (mmc_blk_alloc_parts(card, md)) |
1525 | goto out; | |
1526 | ||
1da177e4 | 1527 | mmc_set_drvdata(card, md); |
6f60c222 AW |
1528 | mmc_fixup_device(card, blk_fixups); |
1529 | ||
371a689f AW |
1530 | if (mmc_add_disk(md)) |
1531 | goto out; | |
1532 | ||
1533 | list_for_each_entry(part_md, &md->part, part) { | |
1534 | if (mmc_add_disk(part_md)) | |
1535 | goto out; | |
1536 | } | |
1da177e4 LT |
1537 | return 0; |
1538 | ||
1539 | out: | |
371a689f AW |
1540 | mmc_blk_remove_parts(card, md); |
1541 | mmc_blk_remove_req(md); | |
1da177e4 LT |
1542 | return err; |
1543 | } | |
1544 | ||
1545 | static void mmc_blk_remove(struct mmc_card *card) | |
1546 | { | |
1547 | struct mmc_blk_data *md = mmc_get_drvdata(card); | |
1548 | ||
371a689f | 1549 | mmc_blk_remove_parts(card, md); |
ddd6fa7e AH |
1550 | mmc_claim_host(card->host); |
1551 | mmc_blk_part_switch(card, md); | |
1552 | mmc_release_host(card->host); | |
371a689f | 1553 | mmc_blk_remove_req(md); |
1da177e4 LT |
1554 | mmc_set_drvdata(card, NULL); |
1555 | } | |
1556 | ||
1557 | #ifdef CONFIG_PM | |
1558 | static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state) | |
1559 | { | |
371a689f | 1560 | struct mmc_blk_data *part_md; |
1da177e4 LT |
1561 | struct mmc_blk_data *md = mmc_get_drvdata(card); |
1562 | ||
1563 | if (md) { | |
1564 | mmc_queue_suspend(&md->queue); | |
371a689f AW |
1565 | list_for_each_entry(part_md, &md->part, part) { |
1566 | mmc_queue_suspend(&part_md->queue); | |
1567 | } | |
1da177e4 LT |
1568 | } |
1569 | return 0; | |
1570 | } | |
1571 | ||
1572 | static int mmc_blk_resume(struct mmc_card *card) | |
1573 | { | |
371a689f | 1574 | struct mmc_blk_data *part_md; |
1da177e4 LT |
1575 | struct mmc_blk_data *md = mmc_get_drvdata(card); |
1576 | ||
1577 | if (md) { | |
1578 | mmc_blk_set_blksize(md, card); | |
371a689f AW |
1579 | |
1580 | /* | |
1581 | * Resume involves the card going into idle state, | |
1582 | * so current partition is always the main one. | |
1583 | */ | |
1584 | md->part_curr = md->part_type; | |
1da177e4 | 1585 | mmc_queue_resume(&md->queue); |
371a689f AW |
1586 | list_for_each_entry(part_md, &md->part, part) { |
1587 | mmc_queue_resume(&part_md->queue); | |
1588 | } | |
1da177e4 LT |
1589 | } |
1590 | return 0; | |
1591 | } | |
1592 | #else | |
1593 | #define mmc_blk_suspend NULL | |
1594 | #define mmc_blk_resume NULL | |
1595 | #endif | |
1596 | ||
1597 | static struct mmc_driver mmc_driver = { | |
1598 | .drv = { | |
1599 | .name = "mmcblk", | |
1600 | }, | |
1601 | .probe = mmc_blk_probe, | |
1602 | .remove = mmc_blk_remove, | |
1603 | .suspend = mmc_blk_suspend, | |
1604 | .resume = mmc_blk_resume, | |
1605 | }; | |
1606 | ||
1607 | static int __init mmc_blk_init(void) | |
1608 | { | |
9d4e98e9 | 1609 | int res; |
1da177e4 | 1610 | |
5e71b7a6 OJ |
1611 | if (perdev_minors != CONFIG_MMC_BLOCK_MINORS) |
1612 | pr_info("mmcblk: using %d minors per device\n", perdev_minors); | |
1613 | ||
1614 | max_devices = 256 / perdev_minors; | |
1615 | ||
fe6b4c88 PO |
1616 | res = register_blkdev(MMC_BLOCK_MAJOR, "mmc"); |
1617 | if (res) | |
1da177e4 | 1618 | goto out; |
1da177e4 | 1619 | |
9d4e98e9 AM |
1620 | res = mmc_register_driver(&mmc_driver); |
1621 | if (res) | |
1622 | goto out2; | |
1da177e4 | 1623 | |
9d4e98e9 AM |
1624 | return 0; |
1625 | out2: | |
1626 | unregister_blkdev(MMC_BLOCK_MAJOR, "mmc"); | |
1da177e4 LT |
1627 | out: |
1628 | return res; | |
1629 | } | |
1630 | ||
1631 | static void __exit mmc_blk_exit(void) | |
1632 | { | |
1633 | mmc_unregister_driver(&mmc_driver); | |
fe6b4c88 | 1634 | unregister_blkdev(MMC_BLOCK_MAJOR, "mmc"); |
1da177e4 LT |
1635 | } |
1636 | ||
1637 | module_init(mmc_blk_init); | |
1638 | module_exit(mmc_blk_exit); | |
1639 | ||
1640 | MODULE_LICENSE("GPL"); | |
1641 | MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver"); | |
1642 |