1 /* A simple block driver for lguest.
3 * Copyright 2006 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <linux/init.h>
21 #include <linux/types.h>
22 #include <linux/blkdev.h>
23 #include <linux/interrupt.h>
24 #include <linux/lguest_bus.h>
26 static char next_block_index
= 'a';
32 /* The disk structure for the kernel. */
35 /* The major number for this disk. */
39 unsigned long phys_addr
;
40 /* The mapped block page. */
41 struct lguest_block_page
*lb_page
;
43 /* We only have a single request outstanding at a time. */
44 struct lguest_dma dma
;
48 /* Jens gave me this nice helper to end all chunks of a request. */
49 static void end_entire_request(struct request
*req
, int uptodate
)
51 if (end_that_request_first(req
, uptodate
, req
->hard_nr_sectors
))
53 add_disk_randomness(req
->rq_disk
);
54 blkdev_dequeue_request(req
);
55 end_that_request_last(req
, uptodate
);
58 static irqreturn_t
lgb_irq(int irq
, void *_bd
)
60 struct blockdev
*bd
= _bd
;
64 pr_debug("No work!\n");
68 if (!bd
->lb_page
->result
) {
69 pr_debug("No result!\n");
73 spin_lock_irqsave(&bd
->lock
, flags
);
74 end_entire_request(bd
->req
, bd
->lb_page
->result
== 1);
77 blk_start_queue(bd
->disk
->queue
);
78 spin_unlock_irqrestore(&bd
->lock
, flags
);
82 static unsigned int req_to_dma(struct request
*req
, struct lguest_dma
*dma
)
84 unsigned int i
= 0, idx
, len
= 0;
87 rq_for_each_bio(bio
, req
) {
89 bio_for_each_segment(bvec
, bio
, idx
) {
90 BUG_ON(i
== LGUEST_MAX_DMA_SECTIONS
);
91 BUG_ON(!bvec
->bv_len
);
92 dma
->addr
[i
] = page_to_phys(bvec
->bv_page
)
94 dma
->len
[i
] = bvec
->bv_len
;
99 if (i
< LGUEST_MAX_DMA_SECTIONS
)
104 static void empty_dma(struct lguest_dma
*dma
)
109 static void setup_req(struct blockdev
*bd
,
110 int type
, struct request
*req
, struct lguest_dma
*dma
)
112 bd
->lb_page
->type
= type
;
113 bd
->lb_page
->sector
= req
->sector
;
114 bd
->lb_page
->result
= 0;
116 bd
->lb_page
->bytes
= req_to_dma(req
, dma
);
119 static void do_write(struct blockdev
*bd
, struct request
*req
)
121 struct lguest_dma send
;
123 pr_debug("lgb: WRITE sector %li\n", (long)req
->sector
);
124 setup_req(bd
, 1, req
, &send
);
126 lguest_send_dma(bd
->phys_addr
, &send
);
129 static void do_read(struct blockdev
*bd
, struct request
*req
)
131 struct lguest_dma ping
;
133 pr_debug("lgb: READ sector %li\n", (long)req
->sector
);
134 setup_req(bd
, 0, req
, &bd
->dma
);
137 lguest_send_dma(bd
->phys_addr
, &ping
);
140 static void do_lgb_request(struct request_queue
*q
)
146 req
= elv_next_request(q
);
150 bd
= req
->rq_disk
->private_data
;
151 /* Sometimes we get repeated requests after blk_stop_queue. */
155 if (!blk_fs_request(req
)) {
156 pr_debug("Got non-command 0x%08x\n", req
->cmd_type
);
158 end_entire_request(req
, 0);
162 if (rq_data_dir(req
) == WRITE
)
167 /* Wait for interrupt to tell us it's done. */
171 static struct block_device_operations lguestblk_fops
= {
172 .owner
= THIS_MODULE
,
175 static int lguestblk_probe(struct lguest_device
*lgdev
)
179 int irqflags
= IRQF_SHARED
;
181 bd
= kmalloc(sizeof(*bd
), GFP_KERNEL
);
185 spin_lock_init(&bd
->lock
);
186 bd
->irq
= lgdev_irq(lgdev
);
188 bd
->dma
.used_len
= 0;
190 bd
->phys_addr
= (lguest_devices
[lgdev
->index
].pfn
<< PAGE_SHIFT
);
192 bd
->lb_page
= lguest_map(bd
->phys_addr
, 1);
198 bd
->major
= register_blkdev(0, "lguestblk");
204 bd
->disk
= alloc_disk(1);
207 goto out_unregister_blkdev
;
210 bd
->disk
->queue
= blk_init_queue(do_lgb_request
, &bd
->lock
);
211 if (!bd
->disk
->queue
) {
216 /* We can only handle a certain number of sg entries */
217 blk_queue_max_hw_segments(bd
->disk
->queue
, LGUEST_MAX_DMA_SECTIONS
);
218 /* Buffers must not cross page boundaries */
219 blk_queue_segment_boundary(bd
->disk
->queue
, PAGE_SIZE
-1);
221 sprintf(bd
->disk
->disk_name
, "lgb%c", next_block_index
++);
222 if (lguest_devices
[lgdev
->index
].features
& LGUEST_DEVICE_F_RANDOMNESS
)
223 irqflags
|= IRQF_SAMPLE_RANDOM
;
224 err
= request_irq(bd
->irq
, lgb_irq
, irqflags
, bd
->disk
->disk_name
, bd
);
226 goto out_cleanup_queue
;
228 err
= lguest_bind_dma(bd
->phys_addr
, &bd
->dma
, 1, bd
->irq
);
232 bd
->disk
->major
= bd
->major
;
233 bd
->disk
->first_minor
= 0;
234 bd
->disk
->private_data
= bd
;
235 bd
->disk
->fops
= &lguestblk_fops
;
236 /* This is initialized to the disk size by the other end. */
237 set_capacity(bd
->disk
, bd
->lb_page
->num_sectors
);
240 printk(KERN_INFO
"%s: device %i at major %d\n",
241 bd
->disk
->disk_name
, lgdev
->index
, bd
->major
);
247 free_irq(bd
->irq
, bd
);
249 blk_cleanup_queue(bd
->disk
->queue
);
252 out_unregister_blkdev
:
253 unregister_blkdev(bd
->major
, "lguestblk");
255 lguest_unmap(bd
->lb_page
);
261 static struct lguest_driver lguestblk_drv
= {
263 .owner
= THIS_MODULE
,
264 .device_type
= LGUEST_DEVICE_T_BLOCK
,
265 .probe
= lguestblk_probe
,
268 static __init
int lguestblk_init(void)
270 return register_lguest_driver(&lguestblk_drv
);
272 module_init(lguestblk_init
);
274 MODULE_DESCRIPTION("Lguest block driver");
275 MODULE_LICENSE("GPL");