2 * Block driver for media (i.e., flash cards)
4 * Copyright 2002 Hewlett-Packard Company
5 * Copyright 2005-2008 Pierre Ossman
7 * Use consistent with the GNU GPL is permitted,
8 * provided that this copyright notice is
9 * preserved in its entirety in all copies and derived works.
11 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
12 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
13 * FITNESS FOR ANY PARTICULAR PURPOSE.
15 * Many thanks to Alessandro Rubini and Jonathan Corbet!
17 * Author: Andrew Christian
20 #include <linux/moduleparam.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
24 #include <linux/kernel.h>
26 #include <linux/errno.h>
27 #include <linux/hdreg.h>
28 #include <linux/kdev_t.h>
29 #include <linux/blkdev.h>
30 #include <linux/mutex.h>
31 #include <linux/scatterlist.h>
32 #include <linux/string_helpers.h>
34 #include <linux/mmc/card.h>
35 #include <linux/mmc/host.h>
36 #include <linux/mmc/mmc.h>
37 #include <linux/mmc/sd.h>
39 #include <asm/system.h>
40 #include <asm/uaccess.h>
45 * max 8 partitions per card
48 #define MMC_NUM_MINORS (256 >> MMC_SHIFT)
50 static DECLARE_BITMAP(dev_use
, MMC_NUM_MINORS
);
53 * There is one mmc_blk_data per slot.
58 struct mmc_queue queue
;
61 unsigned int read_only
;
64 static DEFINE_MUTEX(open_lock
);
66 static struct mmc_blk_data
*mmc_blk_get(struct gendisk
*disk
)
68 struct mmc_blk_data
*md
;
70 mutex_lock(&open_lock
);
71 md
= disk
->private_data
;
72 if (md
&& md
->usage
== 0)
76 mutex_unlock(&open_lock
);
81 static void mmc_blk_put(struct mmc_blk_data
*md
)
83 mutex_lock(&open_lock
);
86 int devidx
= MINOR(disk_devt(md
->disk
)) >> MMC_SHIFT
;
87 __clear_bit(devidx
, dev_use
);
92 mutex_unlock(&open_lock
);
95 static int mmc_blk_open(struct block_device
*bdev
, fmode_t mode
)
97 struct mmc_blk_data
*md
= mmc_blk_get(bdev
->bd_disk
);
102 check_disk_change(bdev
);
105 if ((mode
& FMODE_WRITE
) && md
->read_only
) {
114 static int mmc_blk_release(struct gendisk
*disk
, fmode_t mode
)
116 struct mmc_blk_data
*md
= disk
->private_data
;
123 mmc_blk_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
125 geo
->cylinders
= get_capacity(bdev
->bd_disk
) / (4 * 16);
131 static struct block_device_operations mmc_bdops
= {
132 .open
= mmc_blk_open
,
133 .release
= mmc_blk_release
,
134 .getgeo
= mmc_blk_getgeo
,
135 .owner
= THIS_MODULE
,
138 struct mmc_blk_request
{
139 struct mmc_request mrq
;
140 struct mmc_command cmd
;
141 struct mmc_command stop
;
142 struct mmc_data data
;
145 static u32
mmc_sd_num_wr_blocks(struct mmc_card
*card
)
150 struct mmc_request mrq
;
151 struct mmc_command cmd
;
152 struct mmc_data data
;
153 unsigned int timeout_us
;
155 struct scatterlist sg
;
157 memset(&cmd
, 0, sizeof(struct mmc_command
));
159 cmd
.opcode
= MMC_APP_CMD
;
160 cmd
.arg
= card
->rca
<< 16;
161 cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_AC
;
163 err
= mmc_wait_for_cmd(card
->host
, &cmd
, 0);
166 if (!mmc_host_is_spi(card
->host
) && !(cmd
.resp
[0] & R1_APP_CMD
))
169 memset(&cmd
, 0, sizeof(struct mmc_command
));
171 cmd
.opcode
= SD_APP_SEND_NUM_WR_BLKS
;
173 cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_ADTC
;
175 memset(&data
, 0, sizeof(struct mmc_data
));
177 data
.timeout_ns
= card
->csd
.tacc_ns
* 100;
178 data
.timeout_clks
= card
->csd
.tacc_clks
* 100;
180 timeout_us
= data
.timeout_ns
/ 1000;
181 timeout_us
+= data
.timeout_clks
* 1000 /
182 (card
->host
->ios
.clock
/ 1000);
184 if (timeout_us
> 100000) {
185 data
.timeout_ns
= 100000000;
186 data
.timeout_clks
= 0;
191 data
.flags
= MMC_DATA_READ
;
195 memset(&mrq
, 0, sizeof(struct mmc_request
));
200 sg_init_one(&sg
, &blocks
, 4);
202 mmc_wait_for_req(card
->host
, &mrq
);
204 if (cmd
.error
|| data
.error
)
207 return ntohl(blocks
);
210 static u32
get_card_status(struct mmc_card
*card
, struct request
*req
)
212 struct mmc_command cmd
;
215 memset(&cmd
, 0, sizeof(struct mmc_command
));
216 cmd
.opcode
= MMC_SEND_STATUS
;
217 if (!mmc_host_is_spi(card
->host
))
218 cmd
.arg
= card
->rca
<< 16;
219 cmd
.flags
= MMC_RSP_SPI_R2
| MMC_RSP_R1
| MMC_CMD_AC
;
220 err
= mmc_wait_for_cmd(card
->host
, &cmd
, 0);
222 printk(KERN_ERR
"%s: error %d sending status comand",
223 req
->rq_disk
->disk_name
, err
);
227 static int mmc_blk_issue_rq(struct mmc_queue
*mq
, struct request
*req
)
229 struct mmc_blk_data
*md
= mq
->data
;
230 struct mmc_card
*card
= md
->queue
.card
;
231 struct mmc_blk_request brq
;
232 int ret
= 1, disable_multi
= 0;
234 mmc_claim_host(card
->host
);
237 struct mmc_command cmd
;
238 u32 readcmd
, writecmd
, status
= 0;
240 memset(&brq
, 0, sizeof(struct mmc_blk_request
));
241 brq
.mrq
.cmd
= &brq
.cmd
;
242 brq
.mrq
.data
= &brq
.data
;
244 brq
.cmd
.arg
= req
->sector
;
245 if (!mmc_card_blockaddr(card
))
247 brq
.cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_ADTC
;
248 brq
.data
.blksz
= 512;
249 brq
.stop
.opcode
= MMC_STOP_TRANSMISSION
;
251 brq
.stop
.flags
= MMC_RSP_SPI_R1B
| MMC_RSP_R1B
| MMC_CMD_AC
;
252 brq
.data
.blocks
= req
->nr_sectors
;
255 * After a read error, we redo the request one sector at a time
256 * in order to accurately determine which sectors can be read
259 if (disable_multi
&& brq
.data
.blocks
> 1)
262 if (brq
.data
.blocks
> 1) {
263 /* SPI multiblock writes terminate using a special
264 * token, not a STOP_TRANSMISSION request.
266 if (!mmc_host_is_spi(card
->host
)
267 || rq_data_dir(req
) == READ
)
268 brq
.mrq
.stop
= &brq
.stop
;
269 readcmd
= MMC_READ_MULTIPLE_BLOCK
;
270 writecmd
= MMC_WRITE_MULTIPLE_BLOCK
;
273 readcmd
= MMC_READ_SINGLE_BLOCK
;
274 writecmd
= MMC_WRITE_BLOCK
;
277 if (rq_data_dir(req
) == READ
) {
278 brq
.cmd
.opcode
= readcmd
;
279 brq
.data
.flags
|= MMC_DATA_READ
;
281 brq
.cmd
.opcode
= writecmd
;
282 brq
.data
.flags
|= MMC_DATA_WRITE
;
285 mmc_set_data_timeout(&brq
.data
, card
);
287 brq
.data
.sg
= mq
->sg
;
288 brq
.data
.sg_len
= mmc_queue_map_sg(mq
);
291 * Adjust the sg list so it is the same size as the
294 if (brq
.data
.blocks
!= req
->nr_sectors
) {
295 int i
, data_size
= brq
.data
.blocks
<< 9;
296 struct scatterlist
*sg
;
298 for_each_sg(brq
.data
.sg
, sg
, brq
.data
.sg_len
, i
) {
299 data_size
-= sg
->length
;
300 if (data_size
<= 0) {
301 sg
->length
+= data_size
;
309 mmc_queue_bounce_pre(mq
);
311 mmc_wait_for_req(card
->host
, &brq
.mrq
);
313 mmc_queue_bounce_post(mq
);
316 * Check for errors here, but don't jump to cmd_err
317 * until later as we need to wait for the card to leave
318 * programming mode even when things go wrong.
320 if (brq
.cmd
.error
|| brq
.data
.error
|| brq
.stop
.error
) {
321 if (brq
.data
.blocks
> 1 && rq_data_dir(req
) == READ
) {
322 /* Redo read one sector at a time */
323 printk(KERN_WARNING
"%s: retrying using single "
324 "block read\n", req
->rq_disk
->disk_name
);
328 status
= get_card_status(card
, req
);
332 printk(KERN_ERR
"%s: error %d sending read/write "
333 "command, response %#x, card status %#x\n",
334 req
->rq_disk
->disk_name
, brq
.cmd
.error
,
335 brq
.cmd
.resp
[0], status
);
338 if (brq
.data
.error
) {
339 if (brq
.data
.error
== -ETIMEDOUT
&& brq
.mrq
.stop
)
340 /* 'Stop' response contains card status */
341 status
= brq
.mrq
.stop
->resp
[0];
342 printk(KERN_ERR
"%s: error %d transferring data,"
343 " sector %u, nr %u, card status %#x\n",
344 req
->rq_disk
->disk_name
, brq
.data
.error
,
345 (unsigned)req
->sector
,
346 (unsigned)req
->nr_sectors
, status
);
349 if (brq
.stop
.error
) {
350 printk(KERN_ERR
"%s: error %d sending stop command, "
351 "response %#x, card status %#x\n",
352 req
->rq_disk
->disk_name
, brq
.stop
.error
,
353 brq
.stop
.resp
[0], status
);
356 if (!mmc_host_is_spi(card
->host
) && rq_data_dir(req
) != READ
) {
360 cmd
.opcode
= MMC_SEND_STATUS
;
361 cmd
.arg
= card
->rca
<< 16;
362 cmd
.flags
= MMC_RSP_R1
| MMC_CMD_AC
;
363 err
= mmc_wait_for_cmd(card
->host
, &cmd
, 5);
365 printk(KERN_ERR
"%s: error %d requesting status\n",
366 req
->rq_disk
->disk_name
, err
);
370 * Some cards mishandle the status bits,
371 * so make sure to check both the busy
372 * indication and the card state.
374 } while (!(cmd
.resp
[0] & R1_READY_FOR_DATA
) ||
375 (R1_CURRENT_STATE(cmd
.resp
[0]) == 7));
378 if (cmd
.resp
[0] & ~0x00000900)
379 printk(KERN_ERR
"%s: status = %08x\n",
380 req
->rq_disk
->disk_name
, cmd
.resp
[0]);
381 if (mmc_decode_status(cmd
.resp
))
386 if (brq
.cmd
.error
|| brq
.stop
.error
|| brq
.data
.error
) {
387 if (rq_data_dir(req
) == READ
) {
389 * After an error, we redo I/O one sector at a
390 * time, so we only reach here after trying to
391 * read a single sector.
393 spin_lock_irq(&md
->lock
);
394 ret
= __blk_end_request(req
, -EIO
, brq
.data
.blksz
);
395 spin_unlock_irq(&md
->lock
);
402 * A block was successfully transferred.
404 spin_lock_irq(&md
->lock
);
405 ret
= __blk_end_request(req
, 0, brq
.data
.bytes_xfered
);
406 spin_unlock_irq(&md
->lock
);
409 mmc_release_host(card
->host
);
415 * If this is an SD card and we're writing, we can first
416 * mark the known good sectors as ok.
418 * If the card is not SD, we can still ok written sectors
419 * as reported by the controller (which might be less than
420 * the real number of written sectors, but never more).
422 if (mmc_card_sd(card
)) {
425 blocks
= mmc_sd_num_wr_blocks(card
);
426 if (blocks
!= (u32
)-1) {
427 spin_lock_irq(&md
->lock
);
428 ret
= __blk_end_request(req
, 0, blocks
<< 9);
429 spin_unlock_irq(&md
->lock
);
432 spin_lock_irq(&md
->lock
);
433 ret
= __blk_end_request(req
, 0, brq
.data
.bytes_xfered
);
434 spin_unlock_irq(&md
->lock
);
437 mmc_release_host(card
->host
);
439 spin_lock_irq(&md
->lock
);
441 ret
= __blk_end_request(req
, -EIO
, blk_rq_cur_bytes(req
));
442 spin_unlock_irq(&md
->lock
);
448 static inline int mmc_blk_readonly(struct mmc_card
*card
)
450 return mmc_card_readonly(card
) ||
451 !(card
->csd
.cmdclass
& CCC_BLOCK_WRITE
);
454 static struct mmc_blk_data
*mmc_blk_alloc(struct mmc_card
*card
)
456 struct mmc_blk_data
*md
;
459 devidx
= find_first_zero_bit(dev_use
, MMC_NUM_MINORS
);
460 if (devidx
>= MMC_NUM_MINORS
)
461 return ERR_PTR(-ENOSPC
);
462 __set_bit(devidx
, dev_use
);
464 md
= kzalloc(sizeof(struct mmc_blk_data
), GFP_KERNEL
);
472 * Set the read-only status based on the supported commands
473 * and the write protect switch.
475 md
->read_only
= mmc_blk_readonly(card
);
477 md
->disk
= alloc_disk(1 << MMC_SHIFT
);
478 if (md
->disk
== NULL
) {
483 spin_lock_init(&md
->lock
);
486 ret
= mmc_init_queue(&md
->queue
, card
, &md
->lock
);
490 md
->queue
.issue_fn
= mmc_blk_issue_rq
;
493 md
->disk
->major
= MMC_BLOCK_MAJOR
;
494 md
->disk
->first_minor
= devidx
<< MMC_SHIFT
;
495 md
->disk
->fops
= &mmc_bdops
;
496 md
->disk
->private_data
= md
;
497 md
->disk
->queue
= md
->queue
.queue
;
498 md
->disk
->driverfs_dev
= &card
->dev
;
501 * As discussed on lkml, GENHD_FL_REMOVABLE should:
503 * - be set for removable media with permanent block devices
504 * - be unset for removable block devices with permanent media
506 * Since MMC block devices clearly fall under the second
507 * case, we do not set GENHD_FL_REMOVABLE. Userspace
508 * should use the block device creation/destruction hotplug
509 * messages to tell when the card is present.
512 sprintf(md
->disk
->disk_name
, "mmcblk%d", devidx
);
514 blk_queue_hardsect_size(md
->queue
.queue
, 512);
516 if (!mmc_card_sd(card
) && mmc_card_blockaddr(card
)) {
518 * The EXT_CSD sector count is in number or 512 byte
521 set_capacity(md
->disk
, card
->ext_csd
.sectors
);
524 * The CSD capacity field is in units of read_blkbits.
525 * set_capacity takes units of 512 bytes.
527 set_capacity(md
->disk
,
528 card
->csd
.capacity
<< (card
->csd
.read_blkbits
- 9));
541 mmc_blk_set_blksize(struct mmc_blk_data
*md
, struct mmc_card
*card
)
543 struct mmc_command cmd
;
546 /* Block-addressed cards ignore MMC_SET_BLOCKLEN. */
547 if (mmc_card_blockaddr(card
))
550 mmc_claim_host(card
->host
);
551 cmd
.opcode
= MMC_SET_BLOCKLEN
;
553 cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_AC
;
554 err
= mmc_wait_for_cmd(card
->host
, &cmd
, 5);
555 mmc_release_host(card
->host
);
558 printk(KERN_ERR
"%s: unable to set block size to %d: %d\n",
559 md
->disk
->disk_name
, cmd
.arg
, err
);
566 static int mmc_blk_probe(struct mmc_card
*card
)
568 struct mmc_blk_data
*md
;
574 * Check that the card supports the command class(es) we need.
576 if (!(card
->csd
.cmdclass
& CCC_BLOCK_READ
))
579 md
= mmc_blk_alloc(card
);
583 err
= mmc_blk_set_blksize(md
, card
);
587 string_get_size(get_capacity(md
->disk
) << 9, STRING_UNITS_2
,
588 cap_str
, sizeof(cap_str
));
589 printk(KERN_INFO
"%s: %s %s %s %s\n",
590 md
->disk
->disk_name
, mmc_card_id(card
), mmc_card_name(card
),
591 cap_str
, md
->read_only
? "(ro)" : "");
593 mmc_set_drvdata(card
, md
);
603 static void mmc_blk_remove(struct mmc_card
*card
)
605 struct mmc_blk_data
*md
= mmc_get_drvdata(card
);
608 /* Stop new requests from getting into the queue */
609 del_gendisk(md
->disk
);
611 /* Then flush out any already in there */
612 mmc_cleanup_queue(&md
->queue
);
616 mmc_set_drvdata(card
, NULL
);
620 static int mmc_blk_suspend(struct mmc_card
*card
, pm_message_t state
)
622 struct mmc_blk_data
*md
= mmc_get_drvdata(card
);
625 mmc_queue_suspend(&md
->queue
);
630 static int mmc_blk_resume(struct mmc_card
*card
)
632 struct mmc_blk_data
*md
= mmc_get_drvdata(card
);
635 mmc_blk_set_blksize(md
, card
);
636 mmc_queue_resume(&md
->queue
);
641 #define mmc_blk_suspend NULL
642 #define mmc_blk_resume NULL
645 static struct mmc_driver mmc_driver
= {
649 .probe
= mmc_blk_probe
,
650 .remove
= mmc_blk_remove
,
651 .suspend
= mmc_blk_suspend
,
652 .resume
= mmc_blk_resume
,
655 static int __init
mmc_blk_init(void)
659 res
= register_blkdev(MMC_BLOCK_MAJOR
, "mmc");
663 res
= mmc_register_driver(&mmc_driver
);
669 unregister_blkdev(MMC_BLOCK_MAJOR
, "mmc");
674 static void __exit
mmc_blk_exit(void)
676 mmc_unregister_driver(&mmc_driver
);
677 unregister_blkdev(MMC_BLOCK_MAJOR
, "mmc");
680 module_init(mmc_blk_init
);
681 module_exit(mmc_blk_exit
);
683 MODULE_LICENSE("GPL");
684 MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");