4 * XenLinux virtual block device driver.
6 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
7 * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
8 * Copyright (c) 2004, Christian Limpach
9 * Copyright (c) 2004, Andrew Warfield
10 * Copyright (c) 2005, Christopher Clark
11 * Copyright (c) 2005, XenSource Ltd
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License version 2
15 * as published by the Free Software Foundation; or, when distributed
16 * separately from the Linux kernel or incorporated into other
17 * software packages, subject to the following license:
19 * Permission is hereby granted, free of charge, to any person obtaining a copy
20 * of this source file (the "Software"), to deal in the Software without
21 * restriction, including without limitation the rights to use, copy, modify,
22 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
23 * and to permit persons to whom the Software is furnished to do so, subject to
24 * the following conditions:
26 * The above copyright notice and this permission notice shall be included in
27 * all copies or substantial portions of the Software.
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
32 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
33 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
34 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
38 #include <linux/interrupt.h>
39 #include <linux/blkdev.h>
40 #include <linux/hdreg.h>
41 #include <linux/cdrom.h>
42 #include <linux/module.h>
43 #include <linux/slab.h>
44 #include <linux/smp_lock.h>
45 #include <linux/scatterlist.h>
48 #include <xen/xenbus.h>
49 #include <xen/grant_table.h>
50 #include <xen/events.h>
53 #include <xen/interface/grant_table.h>
54 #include <xen/interface/io/blkif.h>
55 #include <xen/interface/io/protocols.h>
57 #include <asm/xen/hypervisor.h>
60 BLKIF_STATE_DISCONNECTED
,
61 BLKIF_STATE_CONNECTED
,
62 BLKIF_STATE_SUSPENDED
,
66 struct blkif_request req
;
67 unsigned long request
;
68 unsigned long frame
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
71 static const struct block_device_operations xlvbd_block_fops
;
73 #define BLK_RING_SIZE __RING_SIZE((struct blkif_sring *)0, PAGE_SIZE)
76 * We have one of these per vbd, whether ide, scsi or 'other'. They
77 * hang in private_data off the gendisk structure. We may end up
78 * putting all kinds of interesting stuff here :-)
83 struct xenbus_device
*xbdev
;
87 enum blkif_state connected
;
89 struct blkif_front_ring ring
;
90 struct scatterlist sg
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
91 unsigned int evtchn
, irq
;
92 struct request_queue
*rq
;
93 struct work_struct work
;
94 struct gnttab_free_callback callback
;
95 struct blk_shadow shadow
[BLK_RING_SIZE
];
96 unsigned long shadow_free
;
101 static DEFINE_SPINLOCK(blkif_io_lock
);
103 static unsigned int nr_minors
;
104 static unsigned long *minors
;
105 static DEFINE_SPINLOCK(minor_lock
);
107 #define MAXIMUM_OUTSTANDING_BLOCK_REQS \
108 (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE)
109 #define GRANT_INVALID_REF 0
111 #define PARTS_PER_DISK 16
112 #define PARTS_PER_EXT_DISK 256
114 #define BLKIF_MAJOR(dev) ((dev)>>8)
115 #define BLKIF_MINOR(dev) ((dev) & 0xff)
118 #define EXTENDED (1<<EXT_SHIFT)
119 #define VDEV_IS_EXTENDED(dev) ((dev)&(EXTENDED))
120 #define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED))
122 #define DEV_NAME "xvd" /* name in /dev */
124 static int get_id_from_freelist(struct blkfront_info
*info
)
126 unsigned long free
= info
->shadow_free
;
127 BUG_ON(free
>= BLK_RING_SIZE
);
128 info
->shadow_free
= info
->shadow
[free
].req
.id
;
129 info
->shadow
[free
].req
.id
= 0x0fffffee; /* debug */
133 static void add_id_to_freelist(struct blkfront_info
*info
,
136 info
->shadow
[id
].req
.id
= info
->shadow_free
;
137 info
->shadow
[id
].request
= 0;
138 info
->shadow_free
= id
;
141 static int xlbd_reserve_minors(unsigned int minor
, unsigned int nr
)
143 unsigned int end
= minor
+ nr
;
146 if (end
> nr_minors
) {
147 unsigned long *bitmap
, *old
;
149 bitmap
= kzalloc(BITS_TO_LONGS(end
) * sizeof(*bitmap
),
154 spin_lock(&minor_lock
);
155 if (end
> nr_minors
) {
157 memcpy(bitmap
, minors
,
158 BITS_TO_LONGS(nr_minors
) * sizeof(*bitmap
));
160 nr_minors
= BITS_TO_LONGS(end
) * BITS_PER_LONG
;
163 spin_unlock(&minor_lock
);
167 spin_lock(&minor_lock
);
168 if (find_next_bit(minors
, end
, minor
) >= end
) {
169 for (; minor
< end
; ++minor
)
170 __set_bit(minor
, minors
);
174 spin_unlock(&minor_lock
);
179 static void xlbd_release_minors(unsigned int minor
, unsigned int nr
)
181 unsigned int end
= minor
+ nr
;
183 BUG_ON(end
> nr_minors
);
184 spin_lock(&minor_lock
);
185 for (; minor
< end
; ++minor
)
186 __clear_bit(minor
, minors
);
187 spin_unlock(&minor_lock
);
190 static void blkif_restart_queue_callback(void *arg
)
192 struct blkfront_info
*info
= (struct blkfront_info
*)arg
;
193 schedule_work(&info
->work
);
196 static int blkif_getgeo(struct block_device
*bd
, struct hd_geometry
*hg
)
198 /* We don't have real geometry info, but let's at least return
199 values consistent with the size of the device */
200 sector_t nsect
= get_capacity(bd
->bd_disk
);
201 sector_t cylinders
= nsect
;
205 sector_div(cylinders
, hg
->heads
* hg
->sectors
);
206 hg
->cylinders
= cylinders
;
207 if ((sector_t
)(hg
->cylinders
+ 1) * hg
->heads
* hg
->sectors
< nsect
)
208 hg
->cylinders
= 0xffff;
212 static int blkif_ioctl(struct block_device
*bdev
, fmode_t mode
,
213 unsigned command
, unsigned long argument
)
215 struct blkfront_info
*info
= bdev
->bd_disk
->private_data
;
218 dev_dbg(&info
->xbdev
->dev
, "command: 0x%x, argument: 0x%lx\n",
219 command
, (long)argument
);
222 case CDROMMULTISESSION
:
223 dev_dbg(&info
->xbdev
->dev
, "FIXME: support multisession CDs later\n");
224 for (i
= 0; i
< sizeof(struct cdrom_multisession
); i
++)
225 if (put_user(0, (char __user
*)(argument
+ i
)))
229 case CDROM_GET_CAPABILITY
: {
230 struct gendisk
*gd
= info
->gd
;
231 if (gd
->flags
& GENHD_FL_CD
)
237 /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n",
239 return -EINVAL
; /* same return as native Linux */
246 * blkif_queue_request
250 * id: for guest use only.
251 * operation: BLKIF_OP_{READ,WRITE,PROBE}
252 * buffer: buffer to read/write into. this should be a
253 * virtual address in the guest os.
255 static int blkif_queue_request(struct request
*req
)
257 struct blkfront_info
*info
= req
->rq_disk
->private_data
;
258 unsigned long buffer_mfn
;
259 struct blkif_request
*ring_req
;
261 unsigned int fsect
, lsect
;
263 grant_ref_t gref_head
;
264 struct scatterlist
*sg
;
266 if (unlikely(info
->connected
!= BLKIF_STATE_CONNECTED
))
269 if (gnttab_alloc_grant_references(
270 BLKIF_MAX_SEGMENTS_PER_REQUEST
, &gref_head
) < 0) {
271 gnttab_request_free_callback(
273 blkif_restart_queue_callback
,
275 BLKIF_MAX_SEGMENTS_PER_REQUEST
);
279 /* Fill out a communications ring structure. */
280 ring_req
= RING_GET_REQUEST(&info
->ring
, info
->ring
.req_prod_pvt
);
281 id
= get_id_from_freelist(info
);
282 info
->shadow
[id
].request
= (unsigned long)req
;
285 ring_req
->sector_number
= (blkif_sector_t
)blk_rq_pos(req
);
286 ring_req
->handle
= info
->handle
;
288 ring_req
->operation
= rq_data_dir(req
) ?
289 BLKIF_OP_WRITE
: BLKIF_OP_READ
;
290 if (req
->cmd_flags
& REQ_HARDBARRIER
)
291 ring_req
->operation
= BLKIF_OP_WRITE_BARRIER
;
293 ring_req
->nr_segments
= blk_rq_map_sg(req
->q
, req
, info
->sg
);
294 BUG_ON(ring_req
->nr_segments
> BLKIF_MAX_SEGMENTS_PER_REQUEST
);
296 for_each_sg(info
->sg
, sg
, ring_req
->nr_segments
, i
) {
297 buffer_mfn
= pfn_to_mfn(page_to_pfn(sg_page(sg
)));
298 fsect
= sg
->offset
>> 9;
299 lsect
= fsect
+ (sg
->length
>> 9) - 1;
300 /* install a grant reference. */
301 ref
= gnttab_claim_grant_reference(&gref_head
);
302 BUG_ON(ref
== -ENOSPC
);
304 gnttab_grant_foreign_access_ref(
306 info
->xbdev
->otherend_id
,
310 info
->shadow
[id
].frame
[i
] = mfn_to_pfn(buffer_mfn
);
312 (struct blkif_request_segment
) {
315 .last_sect
= lsect
};
318 info
->ring
.req_prod_pvt
++;
320 /* Keep a private copy so we can reissue requests when recovering. */
321 info
->shadow
[id
].req
= *ring_req
;
323 gnttab_free_grant_references(gref_head
);
329 static inline void flush_requests(struct blkfront_info
*info
)
333 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info
->ring
, notify
);
336 notify_remote_via_irq(info
->irq
);
341 * read a block; request is in a request queue
343 static void do_blkif_request(struct request_queue
*rq
)
345 struct blkfront_info
*info
= NULL
;
349 pr_debug("Entered do_blkif_request\n");
353 while ((req
= blk_peek_request(rq
)) != NULL
) {
354 info
= req
->rq_disk
->private_data
;
356 if (RING_FULL(&info
->ring
))
359 blk_start_request(req
);
361 if (req
->cmd_type
!= REQ_TYPE_FS
) {
362 __blk_end_request_all(req
, -EIO
);
366 pr_debug("do_blk_req %p: cmd %p, sec %lx, "
367 "(%u/%u) buffer:%p [%s]\n",
368 req
, req
->cmd
, (unsigned long)blk_rq_pos(req
),
369 blk_rq_cur_sectors(req
), blk_rq_sectors(req
),
370 req
->buffer
, rq_data_dir(req
) ? "write" : "read");
372 if (blkif_queue_request(req
)) {
373 blk_requeue_request(rq
, req
);
375 /* Avoid pointless unplugs. */
384 flush_requests(info
);
387 static int xlvbd_init_blk_queue(struct gendisk
*gd
, u16 sector_size
)
389 struct request_queue
*rq
;
391 rq
= blk_init_queue(do_blkif_request
, &blkif_io_lock
);
395 queue_flag_set_unlocked(QUEUE_FLAG_VIRT
, rq
);
397 /* Hard sector size and max sectors impersonate the equiv. hardware. */
398 blk_queue_logical_block_size(rq
, sector_size
);
399 blk_queue_max_hw_sectors(rq
, 512);
401 /* Each segment in a request is up to an aligned page in size. */
402 blk_queue_segment_boundary(rq
, PAGE_SIZE
- 1);
403 blk_queue_max_segment_size(rq
, PAGE_SIZE
);
405 /* Ensure a merged request will fit in a single I/O ring slot. */
406 blk_queue_max_segments(rq
, BLKIF_MAX_SEGMENTS_PER_REQUEST
);
408 /* Make sure buffer addresses are sector-aligned. */
409 blk_queue_dma_alignment(rq
, 511);
411 /* Make sure we don't use bounce buffers. */
412 blk_queue_bounce_limit(rq
, BLK_BOUNCE_ANY
);
420 static int xlvbd_barrier(struct blkfront_info
*info
)
424 err
= blk_queue_ordered(info
->rq
,
425 info
->feature_barrier
? QUEUE_ORDERED_DRAIN
: QUEUE_ORDERED_NONE
);
430 printk(KERN_INFO
"blkfront: %s: barriers %s\n",
432 info
->feature_barrier
? "enabled" : "disabled");
437 static int xlvbd_alloc_gendisk(blkif_sector_t capacity
,
438 struct blkfront_info
*info
,
439 u16 vdisk_info
, u16 sector_size
)
448 BUG_ON(info
->gd
!= NULL
);
449 BUG_ON(info
->rq
!= NULL
);
451 if ((info
->vdevice
>>EXT_SHIFT
) > 1) {
452 /* this is above the extended range; something is wrong */
453 printk(KERN_WARNING
"blkfront: vdevice 0x%x is above the extended range; ignoring\n", info
->vdevice
);
457 if (!VDEV_IS_EXTENDED(info
->vdevice
)) {
458 minor
= BLKIF_MINOR(info
->vdevice
);
459 nr_parts
= PARTS_PER_DISK
;
461 minor
= BLKIF_MINOR_EXT(info
->vdevice
);
462 nr_parts
= PARTS_PER_EXT_DISK
;
465 if ((minor
% nr_parts
) == 0)
466 nr_minors
= nr_parts
;
468 err
= xlbd_reserve_minors(minor
, nr_minors
);
473 gd
= alloc_disk(nr_minors
);
477 offset
= minor
/ nr_parts
;
481 sprintf(gd
->disk_name
, "%s%c", DEV_NAME
, 'a' + offset
);
483 sprintf(gd
->disk_name
, "%s%c%c", DEV_NAME
,
484 'a' + ((offset
/ 26)-1), 'a' + (offset
% 26));
487 sprintf(gd
->disk_name
, "%s%c%d", DEV_NAME
,
489 minor
& (nr_parts
- 1));
491 sprintf(gd
->disk_name
, "%s%c%c%d", DEV_NAME
,
492 'a' + ((offset
/ 26) - 1),
494 minor
& (nr_parts
- 1));
497 gd
->major
= XENVBD_MAJOR
;
498 gd
->first_minor
= minor
;
499 gd
->fops
= &xlvbd_block_fops
;
500 gd
->private_data
= info
;
501 gd
->driverfs_dev
= &(info
->xbdev
->dev
);
502 set_capacity(gd
, capacity
);
504 if (xlvbd_init_blk_queue(gd
, sector_size
)) {
509 info
->rq
= gd
->queue
;
512 if (info
->feature_barrier
)
515 if (vdisk_info
& VDISK_READONLY
)
518 if (vdisk_info
& VDISK_REMOVABLE
)
519 gd
->flags
|= GENHD_FL_REMOVABLE
;
521 if (vdisk_info
& VDISK_CDROM
)
522 gd
->flags
|= GENHD_FL_CD
;
527 xlbd_release_minors(minor
, nr_minors
);
532 static void xlvbd_release_gendisk(struct blkfront_info
*info
)
534 unsigned int minor
, nr_minors
;
537 if (info
->rq
== NULL
)
540 spin_lock_irqsave(&blkif_io_lock
, flags
);
542 /* No more blkif_request(). */
543 blk_stop_queue(info
->rq
);
545 /* No more gnttab callback work. */
546 gnttab_cancel_free_callback(&info
->callback
);
547 spin_unlock_irqrestore(&blkif_io_lock
, flags
);
549 /* Flush gnttab callback work. Must be done with no locks held. */
550 flush_scheduled_work();
552 del_gendisk(info
->gd
);
554 minor
= info
->gd
->first_minor
;
555 nr_minors
= info
->gd
->minors
;
556 xlbd_release_minors(minor
, nr_minors
);
558 blk_cleanup_queue(info
->rq
);
565 static void kick_pending_request_queues(struct blkfront_info
*info
)
567 if (!RING_FULL(&info
->ring
)) {
568 /* Re-enable calldowns. */
569 blk_start_queue(info
->rq
);
570 /* Kick things off immediately. */
571 do_blkif_request(info
->rq
);
575 static void blkif_restart_queue(struct work_struct
*work
)
577 struct blkfront_info
*info
= container_of(work
, struct blkfront_info
, work
);
579 spin_lock_irq(&blkif_io_lock
);
580 if (info
->connected
== BLKIF_STATE_CONNECTED
)
581 kick_pending_request_queues(info
);
582 spin_unlock_irq(&blkif_io_lock
);
585 static void blkif_free(struct blkfront_info
*info
, int suspend
)
587 /* Prevent new requests being issued until we fix things up. */
588 spin_lock_irq(&blkif_io_lock
);
589 info
->connected
= suspend
?
590 BLKIF_STATE_SUSPENDED
: BLKIF_STATE_DISCONNECTED
;
591 /* No more blkif_request(). */
593 blk_stop_queue(info
->rq
);
594 /* No more gnttab callback work. */
595 gnttab_cancel_free_callback(&info
->callback
);
596 spin_unlock_irq(&blkif_io_lock
);
598 /* Flush gnttab callback work. Must be done with no locks held. */
599 flush_scheduled_work();
601 /* Free resources associated with old device channel. */
602 if (info
->ring_ref
!= GRANT_INVALID_REF
) {
603 gnttab_end_foreign_access(info
->ring_ref
, 0,
604 (unsigned long)info
->ring
.sring
);
605 info
->ring_ref
= GRANT_INVALID_REF
;
606 info
->ring
.sring
= NULL
;
609 unbind_from_irqhandler(info
->irq
, info
);
610 info
->evtchn
= info
->irq
= 0;
614 static void blkif_completion(struct blk_shadow
*s
)
617 for (i
= 0; i
< s
->req
.nr_segments
; i
++)
618 gnttab_end_foreign_access(s
->req
.seg
[i
].gref
, 0, 0UL);
621 static irqreturn_t
blkif_interrupt(int irq
, void *dev_id
)
624 struct blkif_response
*bret
;
627 struct blkfront_info
*info
= (struct blkfront_info
*)dev_id
;
630 spin_lock_irqsave(&blkif_io_lock
, flags
);
632 if (unlikely(info
->connected
!= BLKIF_STATE_CONNECTED
)) {
633 spin_unlock_irqrestore(&blkif_io_lock
, flags
);
638 rp
= info
->ring
.sring
->rsp_prod
;
639 rmb(); /* Ensure we see queued responses up to 'rp'. */
641 for (i
= info
->ring
.rsp_cons
; i
!= rp
; i
++) {
644 bret
= RING_GET_RESPONSE(&info
->ring
, i
);
646 req
= (struct request
*)info
->shadow
[id
].request
;
648 blkif_completion(&info
->shadow
[id
]);
650 add_id_to_freelist(info
, id
);
652 error
= (bret
->status
== BLKIF_RSP_OKAY
) ? 0 : -EIO
;
653 switch (bret
->operation
) {
654 case BLKIF_OP_WRITE_BARRIER
:
655 if (unlikely(bret
->status
== BLKIF_RSP_EOPNOTSUPP
)) {
656 printk(KERN_WARNING
"blkfront: %s: write barrier op failed\n",
657 info
->gd
->disk_name
);
659 info
->feature_barrier
= 0;
665 if (unlikely(bret
->status
!= BLKIF_RSP_OKAY
))
666 dev_dbg(&info
->xbdev
->dev
, "Bad return from blkdev data "
667 "request: %x\n", bret
->status
);
669 __blk_end_request_all(req
, error
);
676 info
->ring
.rsp_cons
= i
;
678 if (i
!= info
->ring
.req_prod_pvt
) {
680 RING_FINAL_CHECK_FOR_RESPONSES(&info
->ring
, more_to_do
);
684 info
->ring
.sring
->rsp_event
= i
+ 1;
686 kick_pending_request_queues(info
);
688 spin_unlock_irqrestore(&blkif_io_lock
, flags
);
694 static int setup_blkring(struct xenbus_device
*dev
,
695 struct blkfront_info
*info
)
697 struct blkif_sring
*sring
;
700 info
->ring_ref
= GRANT_INVALID_REF
;
702 sring
= (struct blkif_sring
*)__get_free_page(GFP_NOIO
| __GFP_HIGH
);
704 xenbus_dev_fatal(dev
, -ENOMEM
, "allocating shared ring");
707 SHARED_RING_INIT(sring
);
708 FRONT_RING_INIT(&info
->ring
, sring
, PAGE_SIZE
);
710 sg_init_table(info
->sg
, BLKIF_MAX_SEGMENTS_PER_REQUEST
);
712 err
= xenbus_grant_ring(dev
, virt_to_mfn(info
->ring
.sring
));
714 free_page((unsigned long)sring
);
715 info
->ring
.sring
= NULL
;
718 info
->ring_ref
= err
;
720 err
= xenbus_alloc_evtchn(dev
, &info
->evtchn
);
724 err
= bind_evtchn_to_irqhandler(info
->evtchn
,
726 IRQF_SAMPLE_RANDOM
, "blkif", info
);
728 xenbus_dev_fatal(dev
, err
,
729 "bind_evtchn_to_irqhandler failed");
741 /* Common code used when first setting up, and when resuming. */
742 static int talk_to_blkback(struct xenbus_device
*dev
,
743 struct blkfront_info
*info
)
745 const char *message
= NULL
;
746 struct xenbus_transaction xbt
;
749 /* Create shared ring, alloc event channel. */
750 err
= setup_blkring(dev
, info
);
755 err
= xenbus_transaction_start(&xbt
);
757 xenbus_dev_fatal(dev
, err
, "starting transaction");
758 goto destroy_blkring
;
761 err
= xenbus_printf(xbt
, dev
->nodename
,
762 "ring-ref", "%u", info
->ring_ref
);
764 message
= "writing ring-ref";
765 goto abort_transaction
;
767 err
= xenbus_printf(xbt
, dev
->nodename
,
768 "event-channel", "%u", info
->evtchn
);
770 message
= "writing event-channel";
771 goto abort_transaction
;
773 err
= xenbus_printf(xbt
, dev
->nodename
, "protocol", "%s",
774 XEN_IO_PROTO_ABI_NATIVE
);
776 message
= "writing protocol";
777 goto abort_transaction
;
780 err
= xenbus_transaction_end(xbt
, 0);
784 xenbus_dev_fatal(dev
, err
, "completing transaction");
785 goto destroy_blkring
;
788 xenbus_switch_state(dev
, XenbusStateInitialised
);
793 xenbus_transaction_end(xbt
, 1);
795 xenbus_dev_fatal(dev
, err
, "%s", message
);
803 * Entry point to this code when a new device is created. Allocate the basic
804 * structures and the ring buffer for communication with the backend, and
805 * inform the backend of the appropriate details for those. Switch to
808 static int blkfront_probe(struct xenbus_device
*dev
,
809 const struct xenbus_device_id
*id
)
812 struct blkfront_info
*info
;
814 /* FIXME: Use dynamic device id if this is not set. */
815 err
= xenbus_scanf(XBT_NIL
, dev
->nodename
,
816 "virtual-device", "%i", &vdevice
);
818 /* go looking in the extended area instead */
819 err
= xenbus_scanf(XBT_NIL
, dev
->nodename
, "virtual-device-ext",
822 xenbus_dev_fatal(dev
, err
, "reading virtual-device");
827 info
= kzalloc(sizeof(*info
), GFP_KERNEL
);
829 xenbus_dev_fatal(dev
, -ENOMEM
, "allocating info structure");
833 mutex_init(&info
->mutex
);
835 info
->vdevice
= vdevice
;
836 info
->connected
= BLKIF_STATE_DISCONNECTED
;
837 INIT_WORK(&info
->work
, blkif_restart_queue
);
839 for (i
= 0; i
< BLK_RING_SIZE
; i
++)
840 info
->shadow
[i
].req
.id
= i
+1;
841 info
->shadow
[BLK_RING_SIZE
-1].req
.id
= 0x0fffffff;
843 /* Front end dir is a number, which is used as the id. */
844 info
->handle
= simple_strtoul(strrchr(dev
->nodename
, '/')+1, NULL
, 0);
845 dev_set_drvdata(&dev
->dev
, info
);
847 err
= talk_to_blkback(dev
, info
);
850 dev_set_drvdata(&dev
->dev
, NULL
);
858 static int blkif_recover(struct blkfront_info
*info
)
861 struct blkif_request
*req
;
862 struct blk_shadow
*copy
;
865 /* Stage 1: Make a safe copy of the shadow state. */
866 copy
= kmalloc(sizeof(info
->shadow
),
867 GFP_NOIO
| __GFP_REPEAT
| __GFP_HIGH
);
870 memcpy(copy
, info
->shadow
, sizeof(info
->shadow
));
872 /* Stage 2: Set up free list. */
873 memset(&info
->shadow
, 0, sizeof(info
->shadow
));
874 for (i
= 0; i
< BLK_RING_SIZE
; i
++)
875 info
->shadow
[i
].req
.id
= i
+1;
876 info
->shadow_free
= info
->ring
.req_prod_pvt
;
877 info
->shadow
[BLK_RING_SIZE
-1].req
.id
= 0x0fffffff;
879 /* Stage 3: Find pending requests and requeue them. */
880 for (i
= 0; i
< BLK_RING_SIZE
; i
++) {
882 if (copy
[i
].request
== 0)
885 /* Grab a request slot and copy shadow state into it. */
886 req
= RING_GET_REQUEST(&info
->ring
, info
->ring
.req_prod_pvt
);
889 /* We get a new request id, and must reset the shadow state. */
890 req
->id
= get_id_from_freelist(info
);
891 memcpy(&info
->shadow
[req
->id
], ©
[i
], sizeof(copy
[i
]));
893 /* Rewrite any grant references invalidated by susp/resume. */
894 for (j
= 0; j
< req
->nr_segments
; j
++)
895 gnttab_grant_foreign_access_ref(
897 info
->xbdev
->otherend_id
,
898 pfn_to_mfn(info
->shadow
[req
->id
].frame
[j
]),
901 info
->shadow
[req
->id
].request
));
902 info
->shadow
[req
->id
].req
= *req
;
904 info
->ring
.req_prod_pvt
++;
909 xenbus_switch_state(info
->xbdev
, XenbusStateConnected
);
911 spin_lock_irq(&blkif_io_lock
);
913 /* Now safe for us to use the shared ring */
914 info
->connected
= BLKIF_STATE_CONNECTED
;
916 /* Send off requeued requests */
917 flush_requests(info
);
919 /* Kick any other new requests queued since we resumed */
920 kick_pending_request_queues(info
);
922 spin_unlock_irq(&blkif_io_lock
);
928 * We are reconnecting to the backend, due to a suspend/resume, or a backend
929 * driver restart. We tear down our blkif structure and recreate it, but
930 * leave the device-layer structures intact so that this is transparent to the
931 * rest of the kernel.
933 static int blkfront_resume(struct xenbus_device
*dev
)
935 struct blkfront_info
*info
= dev_get_drvdata(&dev
->dev
);
938 dev_dbg(&dev
->dev
, "blkfront_resume: %s\n", dev
->nodename
);
940 blkif_free(info
, info
->connected
== BLKIF_STATE_CONNECTED
);
942 err
= talk_to_blkback(dev
, info
);
943 if (info
->connected
== BLKIF_STATE_SUSPENDED
&& !err
)
944 err
= blkif_recover(info
);
950 blkfront_closing(struct blkfront_info
*info
)
952 struct xenbus_device
*xbdev
= info
->xbdev
;
953 struct block_device
*bdev
= NULL
;
955 mutex_lock(&info
->mutex
);
957 if (xbdev
->state
== XenbusStateClosing
) {
958 mutex_unlock(&info
->mutex
);
963 bdev
= bdget_disk(info
->gd
, 0);
965 mutex_unlock(&info
->mutex
);
968 xenbus_frontend_closed(xbdev
);
972 mutex_lock(&bdev
->bd_mutex
);
974 if (bdev
->bd_openers
) {
975 xenbus_dev_error(xbdev
, -EBUSY
,
976 "Device in use; refusing to close");
977 xenbus_switch_state(xbdev
, XenbusStateClosing
);
979 xlvbd_release_gendisk(info
);
980 xenbus_frontend_closed(xbdev
);
983 mutex_unlock(&bdev
->bd_mutex
);
988 * Invoked when the backend is finally 'ready' (and has told produced
989 * the details about the physical device - #sectors, size, etc).
991 static void blkfront_connect(struct blkfront_info
*info
)
993 unsigned long long sectors
;
994 unsigned long sector_size
;
998 switch (info
->connected
) {
999 case BLKIF_STATE_CONNECTED
:
1001 * Potentially, the back-end may be signalling
1002 * a capacity change; update the capacity.
1004 err
= xenbus_scanf(XBT_NIL
, info
->xbdev
->otherend
,
1005 "sectors", "%Lu", §ors
);
1006 if (XENBUS_EXIST_ERR(err
))
1008 printk(KERN_INFO
"Setting capacity to %Lu\n",
1010 set_capacity(info
->gd
, sectors
);
1011 revalidate_disk(info
->gd
);
1014 case BLKIF_STATE_SUSPENDED
:
1021 dev_dbg(&info
->xbdev
->dev
, "%s:%s.\n",
1022 __func__
, info
->xbdev
->otherend
);
1024 err
= xenbus_gather(XBT_NIL
, info
->xbdev
->otherend
,
1025 "sectors", "%llu", §ors
,
1026 "info", "%u", &binfo
,
1027 "sector-size", "%lu", §or_size
,
1030 xenbus_dev_fatal(info
->xbdev
, err
,
1031 "reading backend fields at %s",
1032 info
->xbdev
->otherend
);
1036 err
= xenbus_gather(XBT_NIL
, info
->xbdev
->otherend
,
1037 "feature-barrier", "%lu", &info
->feature_barrier
,
1040 info
->feature_barrier
= 0;
1042 err
= xlvbd_alloc_gendisk(sectors
, info
, binfo
, sector_size
);
1044 xenbus_dev_fatal(info
->xbdev
, err
, "xlvbd_add at %s",
1045 info
->xbdev
->otherend
);
1049 xenbus_switch_state(info
->xbdev
, XenbusStateConnected
);
1051 /* Kick pending requests. */
1052 spin_lock_irq(&blkif_io_lock
);
1053 info
->connected
= BLKIF_STATE_CONNECTED
;
1054 kick_pending_request_queues(info
);
1055 spin_unlock_irq(&blkif_io_lock
);
1063 * Callback received when the backend's state changes.
1065 static void blkback_changed(struct xenbus_device
*dev
,
1066 enum xenbus_state backend_state
)
1068 struct blkfront_info
*info
= dev_get_drvdata(&dev
->dev
);
1070 dev_dbg(&dev
->dev
, "blkfront:blkback_changed to state %d.\n", backend_state
);
1072 switch (backend_state
) {
1073 case XenbusStateInitialising
:
1074 case XenbusStateInitWait
:
1075 case XenbusStateInitialised
:
1076 case XenbusStateUnknown
:
1077 case XenbusStateClosed
:
1080 case XenbusStateConnected
:
1081 blkfront_connect(info
);
1084 case XenbusStateClosing
:
1085 blkfront_closing(info
);
1090 static int blkfront_remove(struct xenbus_device
*xbdev
)
1092 struct blkfront_info
*info
= dev_get_drvdata(&xbdev
->dev
);
1093 struct block_device
*bdev
= NULL
;
1094 struct gendisk
*disk
;
1096 dev_dbg(&xbdev
->dev
, "%s removed", xbdev
->nodename
);
1098 blkif_free(info
, 0);
1100 mutex_lock(&info
->mutex
);
1104 bdev
= bdget_disk(disk
, 0);
1107 mutex_unlock(&info
->mutex
);
1115 * The xbdev was removed before we reached the Closed
1116 * state. See if it's safe to remove the disk. If the bdev
1117 * isn't closed yet, we let release take care of it.
1120 mutex_lock(&bdev
->bd_mutex
);
1121 info
= disk
->private_data
;
1123 if (info
&& !bdev
->bd_openers
) {
1124 xlvbd_release_gendisk(info
);
1125 disk
->private_data
= NULL
;
1129 mutex_unlock(&bdev
->bd_mutex
);
1135 static int blkfront_is_ready(struct xenbus_device
*dev
)
1137 struct blkfront_info
*info
= dev_get_drvdata(&dev
->dev
);
1139 return info
->is_ready
&& info
->xbdev
;
1142 static int blkif_open(struct block_device
*bdev
, fmode_t mode
)
1144 struct gendisk
*disk
= bdev
->bd_disk
;
1145 struct blkfront_info
*info
;
1150 info
= disk
->private_data
;
1157 mutex_lock(&info
->mutex
);
1160 /* xbdev is closed */
1163 mutex_unlock(&info
->mutex
);
1170 static int blkif_release(struct gendisk
*disk
, fmode_t mode
)
1172 struct blkfront_info
*info
= disk
->private_data
;
1173 struct block_device
*bdev
;
1174 struct xenbus_device
*xbdev
;
1178 bdev
= bdget_disk(disk
, 0);
1181 if (bdev
->bd_openers
)
1185 * Check if we have been instructed to close. We will have
1186 * deferred this request, because the bdev was still open.
1189 mutex_lock(&info
->mutex
);
1190 xbdev
= info
->xbdev
;
1192 if (xbdev
&& xbdev
->state
== XenbusStateClosing
) {
1193 /* pending switch to state closed */
1194 xlvbd_release_gendisk(info
);
1195 xenbus_frontend_closed(info
->xbdev
);
1198 mutex_unlock(&info
->mutex
);
1201 /* sudden device removal */
1202 xlvbd_release_gendisk(info
);
1203 disk
->private_data
= NULL
;
1211 static const struct block_device_operations xlvbd_block_fops
=
1213 .owner
= THIS_MODULE
,
1215 .release
= blkif_release
,
1216 .getgeo
= blkif_getgeo
,
1217 .ioctl
= blkif_ioctl
,
1221 static const struct xenbus_device_id blkfront_ids
[] = {
1226 static struct xenbus_driver blkfront
= {
1228 .owner
= THIS_MODULE
,
1229 .ids
= blkfront_ids
,
1230 .probe
= blkfront_probe
,
1231 .remove
= blkfront_remove
,
1232 .resume
= blkfront_resume
,
1233 .otherend_changed
= blkback_changed
,
1234 .is_ready
= blkfront_is_ready
,
1237 static int __init
xlblk_init(void)
1242 if (register_blkdev(XENVBD_MAJOR
, DEV_NAME
)) {
1243 printk(KERN_WARNING
"xen_blk: can't get major %d with name %s\n",
1244 XENVBD_MAJOR
, DEV_NAME
);
1248 return xenbus_register_frontend(&blkfront
);
1250 module_init(xlblk_init
);
1253 static void __exit
xlblk_exit(void)
1255 return xenbus_unregister_driver(&blkfront
);
1257 module_exit(xlblk_exit
);
1259 MODULE_DESCRIPTION("Xen virtual block device frontend");
1260 MODULE_LICENSE("GPL");
1261 MODULE_ALIAS_BLOCKDEV_MAJOR(XENVBD_MAJOR
);
1262 MODULE_ALIAS("xen:vbd");
1263 MODULE_ALIAS("xenblk");