]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/mmc/card/block.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-2.6-nmw
[mirror_ubuntu-artful-kernel.git] / drivers / mmc / card / block.c
1 /*
2 * Block driver for media (i.e., flash cards)
3 *
4 * Copyright 2002 Hewlett-Packard Company
5 * Copyright 2005-2007 Pierre Ossman
6 *
7 * Use consistent with the GNU GPL is permitted,
8 * provided that this copyright notice is
9 * preserved in its entirety in all copies and derived works.
10 *
11 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
12 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
13 * FITNESS FOR ANY PARTICULAR PURPOSE.
14 *
15 * Many thanks to Alessandro Rubini and Jonathan Corbet!
16 *
17 * Author: Andrew Christian
18 * 28 May 2002
19 */
20 #include <linux/moduleparam.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
23
24 #include <linux/kernel.h>
25 #include <linux/fs.h>
26 #include <linux/errno.h>
27 #include <linux/hdreg.h>
28 #include <linux/kdev_t.h>
29 #include <linux/blkdev.h>
30 #include <linux/mutex.h>
31 #include <linux/scatterlist.h>
32
33 #include <linux/mmc/card.h>
34 #include <linux/mmc/host.h>
35 #include <linux/mmc/mmc.h>
36 #include <linux/mmc/sd.h>
37
38 #include <asm/system.h>
39 #include <asm/uaccess.h>
40
41 #include "queue.h"
42
43 /*
44 * max 8 partitions per card
45 */
46 #define MMC_SHIFT 3
47
48 /*
49 * There is one mmc_blk_data per slot.
50 */
51 struct mmc_blk_data {
52 spinlock_t lock;
53 struct gendisk *disk;
54 struct mmc_queue queue;
55
56 unsigned int usage;
57 unsigned int block_bits;
58 unsigned int read_only;
59 };
60
61 static DEFINE_MUTEX(open_lock);
62
63 static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
64 {
65 struct mmc_blk_data *md;
66
67 mutex_lock(&open_lock);
68 md = disk->private_data;
69 if (md && md->usage == 0)
70 md = NULL;
71 if (md)
72 md->usage++;
73 mutex_unlock(&open_lock);
74
75 return md;
76 }
77
78 static void mmc_blk_put(struct mmc_blk_data *md)
79 {
80 mutex_lock(&open_lock);
81 md->usage--;
82 if (md->usage == 0) {
83 put_disk(md->disk);
84 kfree(md);
85 }
86 mutex_unlock(&open_lock);
87 }
88
89 static int mmc_blk_open(struct inode *inode, struct file *filp)
90 {
91 struct mmc_blk_data *md;
92 int ret = -ENXIO;
93
94 md = mmc_blk_get(inode->i_bdev->bd_disk);
95 if (md) {
96 if (md->usage == 2)
97 check_disk_change(inode->i_bdev);
98 ret = 0;
99
100 if ((filp->f_mode & FMODE_WRITE) && md->read_only)
101 ret = -EROFS;
102 }
103
104 return ret;
105 }
106
107 static int mmc_blk_release(struct inode *inode, struct file *filp)
108 {
109 struct mmc_blk_data *md = inode->i_bdev->bd_disk->private_data;
110
111 mmc_blk_put(md);
112 return 0;
113 }
114
115 static int
116 mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
117 {
118 geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
119 geo->heads = 4;
120 geo->sectors = 16;
121 return 0;
122 }
123
124 static struct block_device_operations mmc_bdops = {
125 .open = mmc_blk_open,
126 .release = mmc_blk_release,
127 .getgeo = mmc_blk_getgeo,
128 .owner = THIS_MODULE,
129 };
130
131 struct mmc_blk_request {
132 struct mmc_request mrq;
133 struct mmc_command cmd;
134 struct mmc_command stop;
135 struct mmc_data data;
136 };
137
138 static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
139 {
140 int err;
141 u32 blocks;
142
143 struct mmc_request mrq;
144 struct mmc_command cmd;
145 struct mmc_data data;
146 unsigned int timeout_us;
147
148 struct scatterlist sg;
149
150 memset(&cmd, 0, sizeof(struct mmc_command));
151
152 cmd.opcode = MMC_APP_CMD;
153 cmd.arg = card->rca << 16;
154 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
155
156 err = mmc_wait_for_cmd(card->host, &cmd, 0);
157 if (err)
158 return (u32)-1;
159 if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
160 return (u32)-1;
161
162 memset(&cmd, 0, sizeof(struct mmc_command));
163
164 cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
165 cmd.arg = 0;
166 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
167
168 memset(&data, 0, sizeof(struct mmc_data));
169
170 data.timeout_ns = card->csd.tacc_ns * 100;
171 data.timeout_clks = card->csd.tacc_clks * 100;
172
173 timeout_us = data.timeout_ns / 1000;
174 timeout_us += data.timeout_clks * 1000 /
175 (card->host->ios.clock / 1000);
176
177 if (timeout_us > 100000) {
178 data.timeout_ns = 100000000;
179 data.timeout_clks = 0;
180 }
181
182 data.blksz = 4;
183 data.blocks = 1;
184 data.flags = MMC_DATA_READ;
185 data.sg = &sg;
186 data.sg_len = 1;
187
188 memset(&mrq, 0, sizeof(struct mmc_request));
189
190 mrq.cmd = &cmd;
191 mrq.data = &data;
192
193 sg_init_one(&sg, &blocks, 4);
194
195 mmc_wait_for_req(card->host, &mrq);
196
197 if (cmd.error || data.error)
198 return (u32)-1;
199
200 blocks = ntohl(blocks);
201
202 return blocks;
203 }
204
205 static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
206 {
207 struct mmc_blk_data *md = mq->data;
208 struct mmc_card *card = md->queue.card;
209 struct mmc_blk_request brq;
210 int ret = 1, sg_pos, data_size;
211
212 mmc_claim_host(card->host);
213
214 do {
215 struct mmc_command cmd;
216 u32 readcmd, writecmd;
217
218 memset(&brq, 0, sizeof(struct mmc_blk_request));
219 brq.mrq.cmd = &brq.cmd;
220 brq.mrq.data = &brq.data;
221
222 brq.cmd.arg = req->sector;
223 if (!mmc_card_blockaddr(card))
224 brq.cmd.arg <<= 9;
225 brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
226 brq.data.blksz = 1 << md->block_bits;
227 brq.stop.opcode = MMC_STOP_TRANSMISSION;
228 brq.stop.arg = 0;
229 brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
230 brq.data.blocks = req->nr_sectors >> (md->block_bits - 9);
231 if (brq.data.blocks > card->host->max_blk_count)
232 brq.data.blocks = card->host->max_blk_count;
233
234 /*
235 * If the host doesn't support multiple block writes, force
236 * block writes to single block. SD cards are excepted from
237 * this rule as they support querying the number of
238 * successfully written sectors.
239 */
240 if (rq_data_dir(req) != READ &&
241 !(card->host->caps & MMC_CAP_MULTIWRITE) &&
242 !mmc_card_sd(card))
243 brq.data.blocks = 1;
244
245 if (brq.data.blocks > 1) {
246 /* SPI multiblock writes terminate using a special
247 * token, not a STOP_TRANSMISSION request.
248 */
249 if (!mmc_host_is_spi(card->host)
250 || rq_data_dir(req) == READ)
251 brq.mrq.stop = &brq.stop;
252 readcmd = MMC_READ_MULTIPLE_BLOCK;
253 writecmd = MMC_WRITE_MULTIPLE_BLOCK;
254 } else {
255 brq.mrq.stop = NULL;
256 readcmd = MMC_READ_SINGLE_BLOCK;
257 writecmd = MMC_WRITE_BLOCK;
258 }
259
260 if (rq_data_dir(req) == READ) {
261 brq.cmd.opcode = readcmd;
262 brq.data.flags |= MMC_DATA_READ;
263 } else {
264 brq.cmd.opcode = writecmd;
265 brq.data.flags |= MMC_DATA_WRITE;
266 }
267
268 mmc_set_data_timeout(&brq.data, card);
269
270 brq.data.sg = mq->sg;
271 brq.data.sg_len = mmc_queue_map_sg(mq);
272
273 mmc_queue_bounce_pre(mq);
274
275 if (brq.data.blocks !=
276 (req->nr_sectors >> (md->block_bits - 9))) {
277 data_size = brq.data.blocks * brq.data.blksz;
278 for (sg_pos = 0; sg_pos < brq.data.sg_len; sg_pos++) {
279 data_size -= mq->sg[sg_pos].length;
280 if (data_size <= 0) {
281 mq->sg[sg_pos].length += data_size;
282 sg_pos++;
283 break;
284 }
285 }
286 brq.data.sg_len = sg_pos;
287 }
288
289 mmc_wait_for_req(card->host, &brq.mrq);
290
291 mmc_queue_bounce_post(mq);
292
293 if (brq.cmd.error) {
294 printk(KERN_ERR "%s: error %d sending read/write command\n",
295 req->rq_disk->disk_name, brq.cmd.error);
296 goto cmd_err;
297 }
298
299 if (brq.data.error) {
300 printk(KERN_ERR "%s: error %d transferring data\n",
301 req->rq_disk->disk_name, brq.data.error);
302 goto cmd_err;
303 }
304
305 if (brq.stop.error) {
306 printk(KERN_ERR "%s: error %d sending stop command\n",
307 req->rq_disk->disk_name, brq.stop.error);
308 goto cmd_err;
309 }
310
311 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
312 do {
313 int err;
314
315 cmd.opcode = MMC_SEND_STATUS;
316 cmd.arg = card->rca << 16;
317 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
318 err = mmc_wait_for_cmd(card->host, &cmd, 5);
319 if (err) {
320 printk(KERN_ERR "%s: error %d requesting status\n",
321 req->rq_disk->disk_name, err);
322 goto cmd_err;
323 }
324 } while (!(cmd.resp[0] & R1_READY_FOR_DATA));
325
326 #if 0
327 if (cmd.resp[0] & ~0x00000900)
328 printk(KERN_ERR "%s: status = %08x\n",
329 req->rq_disk->disk_name, cmd.resp[0]);
330 if (mmc_decode_status(cmd.resp))
331 goto cmd_err;
332 #endif
333 }
334
335 /*
336 * A block was successfully transferred.
337 */
338 spin_lock_irq(&md->lock);
339 ret = end_that_request_chunk(req, 1, brq.data.bytes_xfered);
340 if (!ret) {
341 /*
342 * The whole request completed successfully.
343 */
344 add_disk_randomness(req->rq_disk);
345 blkdev_dequeue_request(req);
346 end_that_request_last(req, 1);
347 }
348 spin_unlock_irq(&md->lock);
349 } while (ret);
350
351 mmc_release_host(card->host);
352
353 return 1;
354
355 cmd_err:
356 /*
357 * If this is an SD card and we're writing, we can first
358 * mark the known good sectors as ok.
359 *
360 * If the card is not SD, we can still ok written sectors
361 * if the controller can do proper error reporting.
362 *
363 * For reads we just fail the entire chunk as that should
364 * be safe in all cases.
365 */
366 if (rq_data_dir(req) != READ && mmc_card_sd(card)) {
367 u32 blocks;
368 unsigned int bytes;
369
370 blocks = mmc_sd_num_wr_blocks(card);
371 if (blocks != (u32)-1) {
372 if (card->csd.write_partial)
373 bytes = blocks << md->block_bits;
374 else
375 bytes = blocks << 9;
376 spin_lock_irq(&md->lock);
377 ret = end_that_request_chunk(req, 1, bytes);
378 spin_unlock_irq(&md->lock);
379 }
380 } else if (rq_data_dir(req) != READ &&
381 (card->host->caps & MMC_CAP_MULTIWRITE)) {
382 spin_lock_irq(&md->lock);
383 ret = end_that_request_chunk(req, 1, brq.data.bytes_xfered);
384 spin_unlock_irq(&md->lock);
385 }
386
387 mmc_release_host(card->host);
388
389 spin_lock_irq(&md->lock);
390 while (ret) {
391 ret = end_that_request_chunk(req, 0,
392 req->current_nr_sectors << 9);
393 }
394
395 add_disk_randomness(req->rq_disk);
396 blkdev_dequeue_request(req);
397 end_that_request_last(req, 0);
398 spin_unlock_irq(&md->lock);
399
400 return 0;
401 }
402
403 #define MMC_NUM_MINORS (256 >> MMC_SHIFT)
404
405 static unsigned long dev_use[MMC_NUM_MINORS/(8*sizeof(unsigned long))];
406
407 static inline int mmc_blk_readonly(struct mmc_card *card)
408 {
409 return mmc_card_readonly(card) ||
410 !(card->csd.cmdclass & CCC_BLOCK_WRITE);
411 }
412
413 static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
414 {
415 struct mmc_blk_data *md;
416 int devidx, ret;
417
418 devidx = find_first_zero_bit(dev_use, MMC_NUM_MINORS);
419 if (devidx >= MMC_NUM_MINORS)
420 return ERR_PTR(-ENOSPC);
421 __set_bit(devidx, dev_use);
422
423 md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
424 if (!md) {
425 ret = -ENOMEM;
426 goto out;
427 }
428
429
430 /*
431 * Set the read-only status based on the supported commands
432 * and the write protect switch.
433 */
434 md->read_only = mmc_blk_readonly(card);
435
436 /*
437 * Both SD and MMC specifications state (although a bit
438 * unclearly in the MMC case) that a block size of 512
439 * bytes must always be supported by the card.
440 */
441 md->block_bits = 9;
442
443 md->disk = alloc_disk(1 << MMC_SHIFT);
444 if (md->disk == NULL) {
445 ret = -ENOMEM;
446 goto err_kfree;
447 }
448
449 spin_lock_init(&md->lock);
450 md->usage = 1;
451
452 ret = mmc_init_queue(&md->queue, card, &md->lock);
453 if (ret)
454 goto err_putdisk;
455
456 md->queue.issue_fn = mmc_blk_issue_rq;
457 md->queue.data = md;
458
459 md->disk->major = MMC_BLOCK_MAJOR;
460 md->disk->first_minor = devidx << MMC_SHIFT;
461 md->disk->fops = &mmc_bdops;
462 md->disk->private_data = md;
463 md->disk->queue = md->queue.queue;
464 md->disk->driverfs_dev = &card->dev;
465
466 /*
467 * As discussed on lkml, GENHD_FL_REMOVABLE should:
468 *
469 * - be set for removable media with permanent block devices
470 * - be unset for removable block devices with permanent media
471 *
472 * Since MMC block devices clearly fall under the second
473 * case, we do not set GENHD_FL_REMOVABLE. Userspace
474 * should use the block device creation/destruction hotplug
475 * messages to tell when the card is present.
476 */
477
478 sprintf(md->disk->disk_name, "mmcblk%d", devidx);
479
480 blk_queue_hardsect_size(md->queue.queue, 1 << md->block_bits);
481
482 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
483 /*
484 * The EXT_CSD sector count is in number or 512 byte
485 * sectors.
486 */
487 set_capacity(md->disk, card->ext_csd.sectors);
488 } else {
489 /*
490 * The CSD capacity field is in units of read_blkbits.
491 * set_capacity takes units of 512 bytes.
492 */
493 set_capacity(md->disk,
494 card->csd.capacity << (card->csd.read_blkbits - 9));
495 }
496 return md;
497
498 err_putdisk:
499 put_disk(md->disk);
500 err_kfree:
501 kfree(md);
502 out:
503 return ERR_PTR(ret);
504 }
505
506 static int
507 mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
508 {
509 struct mmc_command cmd;
510 int err;
511
512 /* Block-addressed cards ignore MMC_SET_BLOCKLEN. */
513 if (mmc_card_blockaddr(card))
514 return 0;
515
516 mmc_claim_host(card->host);
517 cmd.opcode = MMC_SET_BLOCKLEN;
518 cmd.arg = 1 << md->block_bits;
519 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
520 err = mmc_wait_for_cmd(card->host, &cmd, 5);
521 mmc_release_host(card->host);
522
523 if (err) {
524 printk(KERN_ERR "%s: unable to set block size to %d: %d\n",
525 md->disk->disk_name, cmd.arg, err);
526 return -EINVAL;
527 }
528
529 return 0;
530 }
531
532 static int mmc_blk_probe(struct mmc_card *card)
533 {
534 struct mmc_blk_data *md;
535 int err;
536
537 /*
538 * Check that the card supports the command class(es) we need.
539 */
540 if (!(card->csd.cmdclass & CCC_BLOCK_READ))
541 return -ENODEV;
542
543 md = mmc_blk_alloc(card);
544 if (IS_ERR(md))
545 return PTR_ERR(md);
546
547 err = mmc_blk_set_blksize(md, card);
548 if (err)
549 goto out;
550
551 printk(KERN_INFO "%s: %s %s %lluKiB %s\n",
552 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
553 (unsigned long long)(get_capacity(md->disk) >> 1),
554 md->read_only ? "(ro)" : "");
555
556 mmc_set_drvdata(card, md);
557 add_disk(md->disk);
558 return 0;
559
560 out:
561 mmc_blk_put(md);
562
563 return err;
564 }
565
566 static void mmc_blk_remove(struct mmc_card *card)
567 {
568 struct mmc_blk_data *md = mmc_get_drvdata(card);
569
570 if (md) {
571 int devidx;
572
573 /* Stop new requests from getting into the queue */
574 del_gendisk(md->disk);
575
576 /* Then flush out any already in there */
577 mmc_cleanup_queue(&md->queue);
578
579 devidx = md->disk->first_minor >> MMC_SHIFT;
580 __clear_bit(devidx, dev_use);
581
582 mmc_blk_put(md);
583 }
584 mmc_set_drvdata(card, NULL);
585 }
586
587 #ifdef CONFIG_PM
588 static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state)
589 {
590 struct mmc_blk_data *md = mmc_get_drvdata(card);
591
592 if (md) {
593 mmc_queue_suspend(&md->queue);
594 }
595 return 0;
596 }
597
598 static int mmc_blk_resume(struct mmc_card *card)
599 {
600 struct mmc_blk_data *md = mmc_get_drvdata(card);
601
602 if (md) {
603 mmc_blk_set_blksize(md, card);
604 mmc_queue_resume(&md->queue);
605 }
606 return 0;
607 }
608 #else
609 #define mmc_blk_suspend NULL
610 #define mmc_blk_resume NULL
611 #endif
612
613 static struct mmc_driver mmc_driver = {
614 .drv = {
615 .name = "mmcblk",
616 },
617 .probe = mmc_blk_probe,
618 .remove = mmc_blk_remove,
619 .suspend = mmc_blk_suspend,
620 .resume = mmc_blk_resume,
621 };
622
623 static int __init mmc_blk_init(void)
624 {
625 int res = -ENOMEM;
626
627 res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
628 if (res)
629 goto out;
630
631 return mmc_register_driver(&mmc_driver);
632
633 out:
634 return res;
635 }
636
637 static void __exit mmc_blk_exit(void)
638 {
639 mmc_unregister_driver(&mmc_driver);
640 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
641 }
642
643 module_init(mmc_blk_init);
644 module_exit(mmc_blk_exit);
645
646 MODULE_LICENSE("GPL");
647 MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
648